1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
37 #include "diagnostic-core.h"
42 /* Simplification and canonicalization of RTL. */
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
52 static bool plus_minus_operand_p (const_rtx
);
53 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
55 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
57 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
59 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
60 enum machine_mode
, rtx
, rtx
);
61 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
62 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66 maximally negative number can overflow). */
68 neg_const_int (enum machine_mode mode
, const_rtx i
)
70 return gen_int_mode (- INTVAL (i
), mode
);
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
77 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
79 unsigned HOST_WIDE_INT val
;
82 if (GET_MODE_CLASS (mode
) != MODE_INT
)
85 width
= GET_MODE_BITSIZE (mode
);
89 if (width
<= HOST_BITS_PER_WIDE_INT
92 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
93 && GET_CODE (x
) == CONST_DOUBLE
94 && CONST_DOUBLE_LOW (x
) == 0)
96 val
= CONST_DOUBLE_HIGH (x
);
97 width
-= HOST_BITS_PER_WIDE_INT
;
102 if (width
< HOST_BITS_PER_WIDE_INT
)
103 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
104 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
107 /* Make a binary operation by properly ordering the operands and
108 seeing if the expression folds. */
111 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
116 /* If this simplifies, do it. */
117 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
121 /* Put complex operands first and constants second if commutative. */
122 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
123 && swap_commutative_operands_p (op0
, op1
))
124 tem
= op0
, op0
= op1
, op1
= tem
;
126 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
129 /* If X is a MEM referencing the constant pool, return the real value.
130 Otherwise return X. */
132 avoid_constant_pool_reference (rtx x
)
135 enum machine_mode cmode
;
136 HOST_WIDE_INT offset
= 0;
138 switch (GET_CODE (x
))
144 /* Handle float extensions of constant pool references. */
146 c
= avoid_constant_pool_reference (tmp
);
147 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
151 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
152 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
160 if (GET_MODE (x
) == BLKmode
)
165 /* Call target hook to avoid the effects of -fpic etc.... */
166 addr
= targetm
.delegitimize_address (addr
);
168 /* Split the address into a base and integer offset. */
169 if (GET_CODE (addr
) == CONST
170 && GET_CODE (XEXP (addr
, 0)) == PLUS
171 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
173 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
174 addr
= XEXP (XEXP (addr
, 0), 0);
177 if (GET_CODE (addr
) == LO_SUM
)
178 addr
= XEXP (addr
, 1);
180 /* If this is a constant pool reference, we can turn it into its
181 constant and hope that simplifications happen. */
182 if (GET_CODE (addr
) == SYMBOL_REF
183 && CONSTANT_POOL_ADDRESS_P (addr
))
185 c
= get_pool_constant (addr
);
186 cmode
= get_pool_mode (addr
);
188 /* If we're accessing the constant in a different mode than it was
189 originally stored, attempt to fix that up via subreg simplifications.
190 If that fails we have no choice but to return the original memory. */
191 if (offset
!= 0 || cmode
!= GET_MODE (x
))
193 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
194 if (tem
&& CONSTANT_P (tem
))
204 /* Simplify a MEM based on its attributes. This is the default
205 delegitimize_address target hook, and it's recommended that every
206 overrider call it. */
209 delegitimize_mem_from_attrs (rtx x
)
211 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
212 use their base addresses as equivalent. */
217 tree decl
= MEM_EXPR (x
);
218 enum machine_mode mode
= GET_MODE (x
);
219 HOST_WIDE_INT offset
= 0;
221 switch (TREE_CODE (decl
))
231 case ARRAY_RANGE_REF
:
236 case VIEW_CONVERT_EXPR
:
238 HOST_WIDE_INT bitsize
, bitpos
;
240 int unsignedp
= 0, volatilep
= 0;
242 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
243 &mode
, &unsignedp
, &volatilep
, false);
244 if (bitsize
!= GET_MODE_BITSIZE (mode
)
245 || (bitpos
% BITS_PER_UNIT
)
246 || (toffset
&& !host_integerp (toffset
, 0)))
250 offset
+= bitpos
/ BITS_PER_UNIT
;
252 offset
+= TREE_INT_CST_LOW (toffset
);
259 && mode
== GET_MODE (x
)
260 && TREE_CODE (decl
) == VAR_DECL
261 && (TREE_STATIC (decl
)
262 || DECL_THREAD_LOCAL_P (decl
))
263 && DECL_RTL_SET_P (decl
)
264 && MEM_P (DECL_RTL (decl
)))
268 offset
+= INTVAL (MEM_OFFSET (x
));
270 newx
= DECL_RTL (decl
);
274 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
276 /* Avoid creating a new MEM needlessly if we already had
277 the same address. We do if there's no OFFSET and the
278 old address X is identical to NEWX, or if X is of the
279 form (plus NEWX OFFSET), or the NEWX is of the form
280 (plus Y (const_int Z)) and X is that with the offset
281 added: (plus Y (const_int Z+OFFSET)). */
283 || (GET_CODE (o
) == PLUS
284 && GET_CODE (XEXP (o
, 1)) == CONST_INT
285 && (offset
== INTVAL (XEXP (o
, 1))
286 || (GET_CODE (n
) == PLUS
287 && GET_CODE (XEXP (n
, 1)) == CONST_INT
288 && (INTVAL (XEXP (n
, 1)) + offset
289 == INTVAL (XEXP (o
, 1)))
290 && (n
= XEXP (n
, 0))))
291 && (o
= XEXP (o
, 0))))
292 && rtx_equal_p (o
, n
)))
293 x
= adjust_address_nv (newx
, mode
, offset
);
295 else if (GET_MODE (x
) == GET_MODE (newx
)
304 /* Make a unary operation by first seeing if it folds and otherwise making
305 the specified operation. */
308 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
309 enum machine_mode op_mode
)
313 /* If this simplifies, use it. */
314 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
317 return gen_rtx_fmt_e (code
, mode
, op
);
320 /* Likewise for ternary operations. */
323 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
324 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
328 /* If this simplifies, use it. */
329 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
333 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
336 /* Likewise, for relational operations.
337 CMP_MODE specifies mode comparison is done in. */
340 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
341 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
345 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
349 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
352 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
353 and simplify the result. If FN is non-NULL, call this callback on each
354 X, if it returns non-NULL, replace X with its return value and simplify the
358 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
359 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
361 enum rtx_code code
= GET_CODE (x
);
362 enum machine_mode mode
= GET_MODE (x
);
363 enum machine_mode op_mode
;
365 rtx op0
, op1
, op2
, newx
, op
;
369 if (__builtin_expect (fn
!= NULL
, 0))
371 newx
= fn (x
, old_rtx
, data
);
375 else if (rtx_equal_p (x
, old_rtx
))
376 return copy_rtx ((rtx
) data
);
378 switch (GET_RTX_CLASS (code
))
382 op_mode
= GET_MODE (op0
);
383 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
384 if (op0
== XEXP (x
, 0))
386 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
390 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
391 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
392 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
394 return simplify_gen_binary (code
, mode
, op0
, op1
);
397 case RTX_COMM_COMPARE
:
400 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
401 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
402 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
403 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
405 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
408 case RTX_BITFIELD_OPS
:
410 op_mode
= GET_MODE (op0
);
411 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
412 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
413 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
414 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
416 if (op_mode
== VOIDmode
)
417 op_mode
= GET_MODE (op0
);
418 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
423 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
424 if (op0
== SUBREG_REG (x
))
426 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
427 GET_MODE (SUBREG_REG (x
)),
429 return op0
? op0
: x
;
436 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
437 if (op0
== XEXP (x
, 0))
439 return replace_equiv_address_nv (x
, op0
);
441 else if (code
== LO_SUM
)
443 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
444 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
446 /* (lo_sum (high x) x) -> x */
447 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
450 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
452 return gen_rtx_LO_SUM (mode
, op0
, op1
);
461 fmt
= GET_RTX_FORMAT (code
);
462 for (i
= 0; fmt
[i
]; i
++)
467 newvec
= XVEC (newx
, i
);
468 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
470 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
472 if (op
!= RTVEC_ELT (vec
, j
))
476 newvec
= shallow_copy_rtvec (vec
);
478 newx
= shallow_copy_rtx (x
);
479 XVEC (newx
, i
) = newvec
;
481 RTVEC_ELT (newvec
, j
) = op
;
489 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
490 if (op
!= XEXP (x
, i
))
493 newx
= shallow_copy_rtx (x
);
502 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
503 resulting RTX. Return a new RTX which is as simplified as possible. */
506 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
508 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
511 /* Try to simplify a unary operation CODE whose output mode is to be
512 MODE with input operand OP whose mode was originally OP_MODE.
513 Return zero if no simplification can be made. */
515 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
516 rtx op
, enum machine_mode op_mode
)
520 trueop
= avoid_constant_pool_reference (op
);
522 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
526 return simplify_unary_operation_1 (code
, mode
, op
);
529 /* Perform some simplifications we can do even if the operands
532 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
534 enum rtx_code reversed
;
540 /* (not (not X)) == X. */
541 if (GET_CODE (op
) == NOT
)
544 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
545 comparison is all ones. */
546 if (COMPARISON_P (op
)
547 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
548 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
549 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
550 XEXP (op
, 0), XEXP (op
, 1));
552 /* (not (plus X -1)) can become (neg X). */
553 if (GET_CODE (op
) == PLUS
554 && XEXP (op
, 1) == constm1_rtx
)
555 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
557 /* Similarly, (not (neg X)) is (plus X -1). */
558 if (GET_CODE (op
) == NEG
)
559 return plus_constant (XEXP (op
, 0), -1);
561 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
562 if (GET_CODE (op
) == XOR
563 && CONST_INT_P (XEXP (op
, 1))
564 && (temp
= simplify_unary_operation (NOT
, mode
,
565 XEXP (op
, 1), mode
)) != 0)
566 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
568 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
569 if (GET_CODE (op
) == PLUS
570 && CONST_INT_P (XEXP (op
, 1))
571 && mode_signbit_p (mode
, XEXP (op
, 1))
572 && (temp
= simplify_unary_operation (NOT
, mode
,
573 XEXP (op
, 1), mode
)) != 0)
574 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
577 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
578 operands other than 1, but that is not valid. We could do a
579 similar simplification for (not (lshiftrt C X)) where C is
580 just the sign bit, but this doesn't seem common enough to
582 if (GET_CODE (op
) == ASHIFT
583 && XEXP (op
, 0) == const1_rtx
)
585 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
586 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
589 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
590 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
591 so we can perform the above simplification. */
593 if (STORE_FLAG_VALUE
== -1
594 && GET_CODE (op
) == ASHIFTRT
595 && GET_CODE (XEXP (op
, 1))
596 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
597 return simplify_gen_relational (GE
, mode
, VOIDmode
,
598 XEXP (op
, 0), const0_rtx
);
601 if (GET_CODE (op
) == SUBREG
602 && subreg_lowpart_p (op
)
603 && (GET_MODE_SIZE (GET_MODE (op
))
604 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
605 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
606 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
608 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
611 x
= gen_rtx_ROTATE (inner_mode
,
612 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
614 XEXP (SUBREG_REG (op
), 1));
615 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
618 /* Apply De Morgan's laws to reduce number of patterns for machines
619 with negating logical insns (and-not, nand, etc.). If result has
620 only one NOT, put it first, since that is how the patterns are
623 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
625 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
626 enum machine_mode op_mode
;
628 op_mode
= GET_MODE (in1
);
629 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
631 op_mode
= GET_MODE (in2
);
632 if (op_mode
== VOIDmode
)
634 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
636 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
639 in2
= in1
; in1
= tem
;
642 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
648 /* (neg (neg X)) == X. */
649 if (GET_CODE (op
) == NEG
)
652 /* (neg (plus X 1)) can become (not X). */
653 if (GET_CODE (op
) == PLUS
654 && XEXP (op
, 1) == const1_rtx
)
655 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
657 /* Similarly, (neg (not X)) is (plus X 1). */
658 if (GET_CODE (op
) == NOT
)
659 return plus_constant (XEXP (op
, 0), 1);
661 /* (neg (minus X Y)) can become (minus Y X). This transformation
662 isn't safe for modes with signed zeros, since if X and Y are
663 both +0, (minus Y X) is the same as (minus X Y). If the
664 rounding mode is towards +infinity (or -infinity) then the two
665 expressions will be rounded differently. */
666 if (GET_CODE (op
) == MINUS
667 && !HONOR_SIGNED_ZEROS (mode
)
668 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
669 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
671 if (GET_CODE (op
) == PLUS
672 && !HONOR_SIGNED_ZEROS (mode
)
673 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
675 /* (neg (plus A C)) is simplified to (minus -C A). */
676 if (CONST_INT_P (XEXP (op
, 1))
677 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
679 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
681 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
684 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
685 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
686 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
689 /* (neg (mult A B)) becomes (mult (neg A) B).
690 This works even for floating-point values. */
691 if (GET_CODE (op
) == MULT
692 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
694 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
695 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
698 /* NEG commutes with ASHIFT since it is multiplication. Only do
699 this if we can then eliminate the NEG (e.g., if the operand
701 if (GET_CODE (op
) == ASHIFT
)
703 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
705 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
708 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
709 C is equal to the width of MODE minus 1. */
710 if (GET_CODE (op
) == ASHIFTRT
711 && CONST_INT_P (XEXP (op
, 1))
712 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
713 return simplify_gen_binary (LSHIFTRT
, mode
,
714 XEXP (op
, 0), XEXP (op
, 1));
716 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
717 C is equal to the width of MODE minus 1. */
718 if (GET_CODE (op
) == LSHIFTRT
719 && CONST_INT_P (XEXP (op
, 1))
720 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
721 return simplify_gen_binary (ASHIFTRT
, mode
,
722 XEXP (op
, 0), XEXP (op
, 1));
724 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
725 if (GET_CODE (op
) == XOR
726 && XEXP (op
, 1) == const1_rtx
727 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
728 return plus_constant (XEXP (op
, 0), -1);
730 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
731 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
732 if (GET_CODE (op
) == LT
733 && XEXP (op
, 1) == const0_rtx
734 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
736 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
737 int isize
= GET_MODE_BITSIZE (inner
);
738 if (STORE_FLAG_VALUE
== 1)
740 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
741 GEN_INT (isize
- 1));
744 if (GET_MODE_BITSIZE (mode
) > isize
)
745 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
746 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
748 else if (STORE_FLAG_VALUE
== -1)
750 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
751 GEN_INT (isize
- 1));
754 if (GET_MODE_BITSIZE (mode
) > isize
)
755 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
756 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
762 /* We can't handle truncation to a partial integer mode here
763 because we don't know the real bitsize of the partial
765 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
768 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
769 if ((GET_CODE (op
) == SIGN_EXTEND
770 || GET_CODE (op
) == ZERO_EXTEND
)
771 && GET_MODE (XEXP (op
, 0)) == mode
)
774 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
775 (OP:SI foo:SI) if OP is NEG or ABS. */
776 if ((GET_CODE (op
) == ABS
777 || GET_CODE (op
) == NEG
)
778 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
779 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
780 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
781 return simplify_gen_unary (GET_CODE (op
), mode
,
782 XEXP (XEXP (op
, 0), 0), mode
);
784 /* (truncate:A (subreg:B (truncate:C X) 0)) is
786 if (GET_CODE (op
) == SUBREG
787 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
788 && subreg_lowpart_p (op
))
789 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
790 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
792 /* If we know that the value is already truncated, we can
793 replace the TRUNCATE with a SUBREG. Note that this is also
794 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
795 modes we just have to apply a different definition for
796 truncation. But don't do this for an (LSHIFTRT (MULT ...))
797 since this will cause problems with the umulXi3_highpart
799 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
800 GET_MODE_BITSIZE (GET_MODE (op
)))
801 ? (num_sign_bit_copies (op
, GET_MODE (op
))
802 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op
))
803 - GET_MODE_BITSIZE (mode
)))
804 : truncated_to_mode (mode
, op
))
805 && ! (GET_CODE (op
) == LSHIFTRT
806 && GET_CODE (XEXP (op
, 0)) == MULT
))
807 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
809 /* A truncate of a comparison can be replaced with a subreg if
810 STORE_FLAG_VALUE permits. This is like the previous test,
811 but it works even if the comparison is done in a mode larger
812 than HOST_BITS_PER_WIDE_INT. */
813 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
815 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
816 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
820 if (DECIMAL_FLOAT_MODE_P (mode
))
823 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
824 if (GET_CODE (op
) == FLOAT_EXTEND
825 && GET_MODE (XEXP (op
, 0)) == mode
)
828 /* (float_truncate:SF (float_truncate:DF foo:XF))
829 = (float_truncate:SF foo:XF).
830 This may eliminate double rounding, so it is unsafe.
832 (float_truncate:SF (float_extend:XF foo:DF))
833 = (float_truncate:SF foo:DF).
835 (float_truncate:DF (float_extend:XF foo:SF))
836 = (float_extend:SF foo:DF). */
837 if ((GET_CODE (op
) == FLOAT_TRUNCATE
838 && flag_unsafe_math_optimizations
)
839 || GET_CODE (op
) == FLOAT_EXTEND
)
840 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
842 > GET_MODE_SIZE (mode
)
843 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
847 /* (float_truncate (float x)) is (float x) */
848 if (GET_CODE (op
) == FLOAT
849 && (flag_unsafe_math_optimizations
850 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
851 && ((unsigned)significand_size (GET_MODE (op
))
852 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
853 - num_sign_bit_copies (XEXP (op
, 0),
854 GET_MODE (XEXP (op
, 0))))))))
855 return simplify_gen_unary (FLOAT
, mode
,
857 GET_MODE (XEXP (op
, 0)));
859 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
860 (OP:SF foo:SF) if OP is NEG or ABS. */
861 if ((GET_CODE (op
) == ABS
862 || GET_CODE (op
) == NEG
)
863 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
864 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
865 return simplify_gen_unary (GET_CODE (op
), mode
,
866 XEXP (XEXP (op
, 0), 0), mode
);
868 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
869 is (float_truncate:SF x). */
870 if (GET_CODE (op
) == SUBREG
871 && subreg_lowpart_p (op
)
872 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
873 return SUBREG_REG (op
);
877 if (DECIMAL_FLOAT_MODE_P (mode
))
880 /* (float_extend (float_extend x)) is (float_extend x)
882 (float_extend (float x)) is (float x) assuming that double
883 rounding can't happen.
885 if (GET_CODE (op
) == FLOAT_EXTEND
886 || (GET_CODE (op
) == FLOAT
887 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
888 && ((unsigned)significand_size (GET_MODE (op
))
889 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
890 - num_sign_bit_copies (XEXP (op
, 0),
891 GET_MODE (XEXP (op
, 0)))))))
892 return simplify_gen_unary (GET_CODE (op
), mode
,
894 GET_MODE (XEXP (op
, 0)));
899 /* (abs (neg <foo>)) -> (abs <foo>) */
900 if (GET_CODE (op
) == NEG
)
901 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
902 GET_MODE (XEXP (op
, 0)));
904 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
906 if (GET_MODE (op
) == VOIDmode
)
909 /* If operand is something known to be positive, ignore the ABS. */
910 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
911 || ((GET_MODE_BITSIZE (GET_MODE (op
))
912 <= HOST_BITS_PER_WIDE_INT
)
913 && ((nonzero_bits (op
, GET_MODE (op
))
914 & ((unsigned HOST_WIDE_INT
) 1
915 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
919 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
920 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
921 return gen_rtx_NEG (mode
, op
);
926 /* (ffs (*_extend <X>)) = (ffs <X>) */
927 if (GET_CODE (op
) == SIGN_EXTEND
928 || GET_CODE (op
) == ZERO_EXTEND
)
929 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
930 GET_MODE (XEXP (op
, 0)));
934 switch (GET_CODE (op
))
938 /* (popcount (zero_extend <X>)) = (popcount <X>) */
939 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
940 GET_MODE (XEXP (op
, 0)));
944 /* Rotations don't affect popcount. */
945 if (!side_effects_p (XEXP (op
, 1)))
946 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
947 GET_MODE (XEXP (op
, 0)));
956 switch (GET_CODE (op
))
962 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
963 GET_MODE (XEXP (op
, 0)));
967 /* Rotations don't affect parity. */
968 if (!side_effects_p (XEXP (op
, 1)))
969 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
970 GET_MODE (XEXP (op
, 0)));
979 /* (bswap (bswap x)) -> x. */
980 if (GET_CODE (op
) == BSWAP
)
985 /* (float (sign_extend <X>)) = (float <X>). */
986 if (GET_CODE (op
) == SIGN_EXTEND
)
987 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
988 GET_MODE (XEXP (op
, 0)));
992 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
993 becomes just the MINUS if its mode is MODE. This allows
994 folding switch statements on machines using casesi (such as
996 if (GET_CODE (op
) == TRUNCATE
997 && GET_MODE (XEXP (op
, 0)) == mode
998 && GET_CODE (XEXP (op
, 0)) == MINUS
999 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1000 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1001 return XEXP (op
, 0);
1003 /* Extending a widening multiplication should be canonicalized to
1004 a wider widening multiplication. */
1005 if (GET_CODE (op
) == MULT
)
1007 rtx lhs
= XEXP (op
, 0);
1008 rtx rhs
= XEXP (op
, 1);
1009 enum rtx_code lcode
= GET_CODE (lhs
);
1010 enum rtx_code rcode
= GET_CODE (rhs
);
1012 /* Widening multiplies usually extend both operands, but sometimes
1013 they use a shift to extract a portion of a register. */
1014 if ((lcode
== SIGN_EXTEND
1015 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1016 && (rcode
== SIGN_EXTEND
1017 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1019 enum machine_mode lmode
= GET_MODE (lhs
);
1020 enum machine_mode rmode
= GET_MODE (rhs
);
1023 if (lcode
== ASHIFTRT
)
1024 /* Number of bits not shifted off the end. */
1025 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1026 else /* lcode == SIGN_EXTEND */
1027 /* Size of inner mode. */
1028 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1030 if (rcode
== ASHIFTRT
)
1031 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1032 else /* rcode == SIGN_EXTEND */
1033 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1035 /* We can only widen multiplies if the result is mathematiclly
1036 equivalent. I.e. if overflow was impossible. */
1037 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1038 return simplify_gen_binary
1040 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1041 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1045 /* Check for a sign extension of a subreg of a promoted
1046 variable, where the promotion is sign-extended, and the
1047 target mode is the same as the variable's promotion. */
1048 if (GET_CODE (op
) == SUBREG
1049 && SUBREG_PROMOTED_VAR_P (op
)
1050 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1051 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1052 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1054 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1055 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1056 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1058 gcc_assert (GET_MODE_BITSIZE (mode
)
1059 > GET_MODE_BITSIZE (GET_MODE (op
)));
1060 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1061 GET_MODE (XEXP (op
, 0)));
1064 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1065 is (sign_extend:M (subreg:O <X>)) if there is mode with
1066 GET_MODE_BITSIZE (N) - I bits.
1067 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1068 is similarly (zero_extend:M (subreg:O <X>)). */
1069 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1070 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1071 && CONST_INT_P (XEXP (op
, 1))
1072 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1073 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1075 enum machine_mode tmode
1076 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1077 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1078 gcc_assert (GET_MODE_BITSIZE (mode
)
1079 > GET_MODE_BITSIZE (GET_MODE (op
)));
1080 if (tmode
!= BLKmode
)
1083 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1084 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1085 ? SIGN_EXTEND
: ZERO_EXTEND
,
1086 mode
, inner
, tmode
);
1090 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1091 /* As we do not know which address space the pointer is refering to,
1092 we can do this only if the target does not support different pointer
1093 or address modes depending on the address space. */
1094 if (target_default_pointer_address_modes_p ()
1095 && ! POINTERS_EXTEND_UNSIGNED
1096 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1098 || (GET_CODE (op
) == SUBREG
1099 && REG_P (SUBREG_REG (op
))
1100 && REG_POINTER (SUBREG_REG (op
))
1101 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1102 return convert_memory_address (Pmode
, op
);
1107 /* Check for a zero extension of a subreg of a promoted
1108 variable, where the promotion is zero-extended, and the
1109 target mode is the same as the variable's promotion. */
1110 if (GET_CODE (op
) == SUBREG
1111 && SUBREG_PROMOTED_VAR_P (op
)
1112 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1113 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1114 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1116 /* Extending a widening multiplication should be canonicalized to
1117 a wider widening multiplication. */
1118 if (GET_CODE (op
) == MULT
)
1120 rtx lhs
= XEXP (op
, 0);
1121 rtx rhs
= XEXP (op
, 1);
1122 enum rtx_code lcode
= GET_CODE (lhs
);
1123 enum rtx_code rcode
= GET_CODE (rhs
);
1125 /* Widening multiplies usually extend both operands, but sometimes
1126 they use a shift to extract a portion of a register. */
1127 if ((lcode
== ZERO_EXTEND
1128 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1129 && (rcode
== ZERO_EXTEND
1130 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1132 enum machine_mode lmode
= GET_MODE (lhs
);
1133 enum machine_mode rmode
= GET_MODE (rhs
);
1136 if (lcode
== LSHIFTRT
)
1137 /* Number of bits not shifted off the end. */
1138 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1139 else /* lcode == ZERO_EXTEND */
1140 /* Size of inner mode. */
1141 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1143 if (rcode
== LSHIFTRT
)
1144 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1145 else /* rcode == ZERO_EXTEND */
1146 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1148 /* We can only widen multiplies if the result is mathematiclly
1149 equivalent. I.e. if overflow was impossible. */
1150 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1151 return simplify_gen_binary
1153 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1154 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1158 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1159 if (GET_CODE (op
) == ZERO_EXTEND
)
1160 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1161 GET_MODE (XEXP (op
, 0)));
1163 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1164 is (zero_extend:M (subreg:O <X>)) if there is mode with
1165 GET_MODE_BITSIZE (N) - I bits. */
1166 if (GET_CODE (op
) == LSHIFTRT
1167 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1168 && CONST_INT_P (XEXP (op
, 1))
1169 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1170 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1172 enum machine_mode tmode
1173 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1174 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1175 if (tmode
!= BLKmode
)
1178 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1179 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1183 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1184 /* As we do not know which address space the pointer is refering to,
1185 we can do this only if the target does not support different pointer
1186 or address modes depending on the address space. */
1187 if (target_default_pointer_address_modes_p ()
1188 && POINTERS_EXTEND_UNSIGNED
> 0
1189 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1191 || (GET_CODE (op
) == SUBREG
1192 && REG_P (SUBREG_REG (op
))
1193 && REG_POINTER (SUBREG_REG (op
))
1194 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1195 return convert_memory_address (Pmode
, op
);
1206 /* Try to compute the value of a unary operation CODE whose output mode is to
1207 be MODE with input operand OP whose mode was originally OP_MODE.
1208 Return zero if the value cannot be computed. */
1210 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1211 rtx op
, enum machine_mode op_mode
)
1213 unsigned int width
= GET_MODE_BITSIZE (mode
);
1215 if (code
== VEC_DUPLICATE
)
1217 gcc_assert (VECTOR_MODE_P (mode
));
1218 if (GET_MODE (op
) != VOIDmode
)
1220 if (!VECTOR_MODE_P (GET_MODE (op
)))
1221 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1223 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1226 if (CONST_INT_P (op
) || GET_CODE (op
) == CONST_DOUBLE
1227 || GET_CODE (op
) == CONST_VECTOR
)
1229 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1230 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1231 rtvec v
= rtvec_alloc (n_elts
);
1234 if (GET_CODE (op
) != CONST_VECTOR
)
1235 for (i
= 0; i
< n_elts
; i
++)
1236 RTVEC_ELT (v
, i
) = op
;
1239 enum machine_mode inmode
= GET_MODE (op
);
1240 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1241 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1243 gcc_assert (in_n_elts
< n_elts
);
1244 gcc_assert ((n_elts
% in_n_elts
) == 0);
1245 for (i
= 0; i
< n_elts
; i
++)
1246 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1248 return gen_rtx_CONST_VECTOR (mode
, v
);
1252 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1254 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1255 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1256 enum machine_mode opmode
= GET_MODE (op
);
1257 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1258 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1259 rtvec v
= rtvec_alloc (n_elts
);
1262 gcc_assert (op_n_elts
== n_elts
);
1263 for (i
= 0; i
< n_elts
; i
++)
1265 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1266 CONST_VECTOR_ELT (op
, i
),
1267 GET_MODE_INNER (opmode
));
1270 RTVEC_ELT (v
, i
) = x
;
1272 return gen_rtx_CONST_VECTOR (mode
, v
);
1275 /* The order of these tests is critical so that, for example, we don't
1276 check the wrong mode (input vs. output) for a conversion operation,
1277 such as FIX. At some point, this should be simplified. */
1279 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
1280 && (GET_CODE (op
) == CONST_DOUBLE
|| CONST_INT_P (op
)))
1282 HOST_WIDE_INT hv
, lv
;
1285 if (CONST_INT_P (op
))
1286 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1288 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1290 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1291 d
= real_value_truncate (mode
, d
);
1292 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1294 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
1295 && (GET_CODE (op
) == CONST_DOUBLE
1296 || CONST_INT_P (op
)))
1298 HOST_WIDE_INT hv
, lv
;
1301 if (CONST_INT_P (op
))
1302 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1304 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1306 if (op_mode
== VOIDmode
)
1308 /* We don't know how to interpret negative-looking numbers in
1309 this case, so don't try to fold those. */
1313 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
1316 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1318 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1319 d
= real_value_truncate (mode
, d
);
1320 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1323 if (CONST_INT_P (op
)
1324 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1326 HOST_WIDE_INT arg0
= INTVAL (op
);
1340 val
= (arg0
>= 0 ? arg0
: - arg0
);
1344 arg0
&= GET_MODE_MASK (mode
);
1345 val
= ffs_hwi (arg0
);
1349 arg0
&= GET_MODE_MASK (mode
);
1350 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1353 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1357 arg0
&= GET_MODE_MASK (mode
);
1360 /* Even if the value at zero is undefined, we have to come
1361 up with some replacement. Seems good enough. */
1362 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1363 val
= GET_MODE_BITSIZE (mode
);
1366 val
= ctz_hwi (arg0
);
1370 arg0
&= GET_MODE_MASK (mode
);
1373 val
++, arg0
&= arg0
- 1;
1377 arg0
&= GET_MODE_MASK (mode
);
1380 val
++, arg0
&= arg0
- 1;
1389 for (s
= 0; s
< width
; s
+= 8)
1391 unsigned int d
= width
- s
- 8;
1392 unsigned HOST_WIDE_INT byte
;
1393 byte
= (arg0
>> s
) & 0xff;
1404 /* When zero-extending a CONST_INT, we need to know its
1406 gcc_assert (op_mode
!= VOIDmode
);
1407 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1409 /* If we were really extending the mode,
1410 we would have to distinguish between zero-extension
1411 and sign-extension. */
1412 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1415 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1416 val
= arg0
& ~((unsigned HOST_WIDE_INT
) (-1)
1417 << GET_MODE_BITSIZE (op_mode
));
1423 if (op_mode
== VOIDmode
)
1425 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1427 /* If we were really extending the mode,
1428 we would have to distinguish between zero-extension
1429 and sign-extension. */
1430 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1433 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1436 = arg0
& ~((unsigned HOST_WIDE_INT
) (-1)
1437 << GET_MODE_BITSIZE (op_mode
));
1438 if (val
& ((unsigned HOST_WIDE_INT
) 1
1439 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1441 -= (unsigned HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1449 case FLOAT_TRUNCATE
:
1461 return gen_int_mode (val
, mode
);
1464 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1465 for a DImode operation on a CONST_INT. */
1466 else if (GET_MODE (op
) == VOIDmode
1467 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1468 && (GET_CODE (op
) == CONST_DOUBLE
1469 || CONST_INT_P (op
)))
1471 unsigned HOST_WIDE_INT l1
, lv
;
1472 HOST_WIDE_INT h1
, hv
;
1474 if (GET_CODE (op
) == CONST_DOUBLE
)
1475 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1477 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1487 neg_double (l1
, h1
, &lv
, &hv
);
1492 neg_double (l1
, h1
, &lv
, &hv
);
1502 lv
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (h1
);
1510 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1511 - HOST_BITS_PER_WIDE_INT
;
1513 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1514 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1515 lv
= GET_MODE_BITSIZE (mode
);
1523 lv
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (h1
);
1524 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1525 lv
= GET_MODE_BITSIZE (mode
);
1553 for (s
= 0; s
< width
; s
+= 8)
1555 unsigned int d
= width
- s
- 8;
1556 unsigned HOST_WIDE_INT byte
;
1558 if (s
< HOST_BITS_PER_WIDE_INT
)
1559 byte
= (l1
>> s
) & 0xff;
1561 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1563 if (d
< HOST_BITS_PER_WIDE_INT
)
1566 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1572 /* This is just a change-of-mode, so do nothing. */
1577 gcc_assert (op_mode
!= VOIDmode
);
1579 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1583 lv
= l1
& GET_MODE_MASK (op_mode
);
1587 if (op_mode
== VOIDmode
1588 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1592 lv
= l1
& GET_MODE_MASK (op_mode
);
1593 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1594 && (lv
& ((unsigned HOST_WIDE_INT
) 1
1595 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1596 lv
-= (unsigned HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1598 hv
= HWI_SIGN_EXTEND (lv
);
1609 return immed_double_const (lv
, hv
, mode
);
1612 else if (GET_CODE (op
) == CONST_DOUBLE
1613 && SCALAR_FLOAT_MODE_P (mode
)
1614 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1616 REAL_VALUE_TYPE d
, t
;
1617 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1622 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1624 real_sqrt (&t
, mode
, &d
);
1628 d
= real_value_abs (&d
);
1631 d
= real_value_negate (&d
);
1633 case FLOAT_TRUNCATE
:
1634 d
= real_value_truncate (mode
, d
);
1637 /* All this does is change the mode, unless changing
1639 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1640 real_convert (&d
, mode
, &d
);
1643 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1650 real_to_target (tmp
, &d
, GET_MODE (op
));
1651 for (i
= 0; i
< 4; i
++)
1653 real_from_target (&d
, tmp
, mode
);
1659 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1662 else if (GET_CODE (op
) == CONST_DOUBLE
1663 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1664 && GET_MODE_CLASS (mode
) == MODE_INT
1665 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1667 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1668 operators are intentionally left unspecified (to ease implementation
1669 by target backends), for consistency, this routine implements the
1670 same semantics for constant folding as used by the middle-end. */
1672 /* This was formerly used only for non-IEEE float.
1673 eggert@twinsun.com says it is safe for IEEE also. */
1674 HOST_WIDE_INT xh
, xl
, th
, tl
;
1675 REAL_VALUE_TYPE x
, t
;
1676 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1680 if (REAL_VALUE_ISNAN (x
))
1683 /* Test against the signed upper bound. */
1684 if (width
> HOST_BITS_PER_WIDE_INT
)
1686 th
= ((unsigned HOST_WIDE_INT
) 1
1687 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1693 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1695 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1696 if (REAL_VALUES_LESS (t
, x
))
1703 /* Test against the signed lower bound. */
1704 if (width
> HOST_BITS_PER_WIDE_INT
)
1706 th
= (unsigned HOST_WIDE_INT
) (-1)
1707 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1713 tl
= (unsigned HOST_WIDE_INT
) (-1) << (width
- 1);
1715 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1716 if (REAL_VALUES_LESS (x
, t
))
1722 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1726 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1729 /* Test against the unsigned upper bound. */
1730 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1735 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1737 th
= ((unsigned HOST_WIDE_INT
) 1
1738 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1744 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1746 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1747 if (REAL_VALUES_LESS (t
, x
))
1754 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1760 return immed_double_const (xl
, xh
, mode
);
1766 /* Subroutine of simplify_binary_operation to simplify a commutative,
1767 associative binary operation CODE with result mode MODE, operating
1768 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1769 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1770 canonicalization is possible. */
1773 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1778 /* Linearize the operator to the left. */
1779 if (GET_CODE (op1
) == code
)
1781 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1782 if (GET_CODE (op0
) == code
)
1784 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1785 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1788 /* "a op (b op c)" becomes "(b op c) op a". */
1789 if (! swap_commutative_operands_p (op1
, op0
))
1790 return simplify_gen_binary (code
, mode
, op1
, op0
);
1797 if (GET_CODE (op0
) == code
)
1799 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1800 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1802 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1803 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1806 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1807 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1809 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1811 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1812 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1814 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1821 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1822 and OP1. Return 0 if no simplification is possible.
1824 Don't use this for relational operations such as EQ or LT.
1825 Use simplify_relational_operation instead. */
1827 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1830 rtx trueop0
, trueop1
;
1833 /* Relational operations don't work here. We must know the mode
1834 of the operands in order to do the comparison correctly.
1835 Assuming a full word can give incorrect results.
1836 Consider comparing 128 with -128 in QImode. */
1837 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1838 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1840 /* Make sure the constant is second. */
1841 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1842 && swap_commutative_operands_p (op0
, op1
))
1844 tem
= op0
, op0
= op1
, op1
= tem
;
1847 trueop0
= avoid_constant_pool_reference (op0
);
1848 trueop1
= avoid_constant_pool_reference (op1
);
1850 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1853 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1856 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1857 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1858 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1859 actual constants. */
1862 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1863 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1865 rtx tem
, reversed
, opleft
, opright
;
1867 unsigned int width
= GET_MODE_BITSIZE (mode
);
1869 /* Even if we can't compute a constant result,
1870 there are some cases worth simplifying. */
1875 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1876 when x is NaN, infinite, or finite and nonzero. They aren't
1877 when x is -0 and the rounding mode is not towards -infinity,
1878 since (-0) + 0 is then 0. */
1879 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1882 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1883 transformations are safe even for IEEE. */
1884 if (GET_CODE (op0
) == NEG
)
1885 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1886 else if (GET_CODE (op1
) == NEG
)
1887 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1889 /* (~a) + 1 -> -a */
1890 if (INTEGRAL_MODE_P (mode
)
1891 && GET_CODE (op0
) == NOT
1892 && trueop1
== const1_rtx
)
1893 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1895 /* Handle both-operands-constant cases. We can only add
1896 CONST_INTs to constants since the sum of relocatable symbols
1897 can't be handled by most assemblers. Don't add CONST_INT
1898 to CONST_INT since overflow won't be computed properly if wider
1899 than HOST_BITS_PER_WIDE_INT. */
1901 if ((GET_CODE (op0
) == CONST
1902 || GET_CODE (op0
) == SYMBOL_REF
1903 || GET_CODE (op0
) == LABEL_REF
)
1904 && CONST_INT_P (op1
))
1905 return plus_constant (op0
, INTVAL (op1
));
1906 else if ((GET_CODE (op1
) == CONST
1907 || GET_CODE (op1
) == SYMBOL_REF
1908 || GET_CODE (op1
) == LABEL_REF
)
1909 && CONST_INT_P (op0
))
1910 return plus_constant (op1
, INTVAL (op0
));
1912 /* See if this is something like X * C - X or vice versa or
1913 if the multiplication is written as a shift. If so, we can
1914 distribute and make a new multiply, shift, or maybe just
1915 have X (if C is 2 in the example above). But don't make
1916 something more expensive than we had before. */
1918 if (SCALAR_INT_MODE_P (mode
))
1920 double_int coeff0
, coeff1
;
1921 rtx lhs
= op0
, rhs
= op1
;
1923 coeff0
= double_int_one
;
1924 coeff1
= double_int_one
;
1926 if (GET_CODE (lhs
) == NEG
)
1928 coeff0
= double_int_minus_one
;
1929 lhs
= XEXP (lhs
, 0);
1931 else if (GET_CODE (lhs
) == MULT
1932 && CONST_INT_P (XEXP (lhs
, 1)))
1934 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
1935 lhs
= XEXP (lhs
, 0);
1937 else if (GET_CODE (lhs
) == ASHIFT
1938 && CONST_INT_P (XEXP (lhs
, 1))
1939 && INTVAL (XEXP (lhs
, 1)) >= 0
1940 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1942 coeff0
= double_int_setbit (double_int_zero
,
1943 INTVAL (XEXP (lhs
, 1)));
1944 lhs
= XEXP (lhs
, 0);
1947 if (GET_CODE (rhs
) == NEG
)
1949 coeff1
= double_int_minus_one
;
1950 rhs
= XEXP (rhs
, 0);
1952 else if (GET_CODE (rhs
) == MULT
1953 && CONST_INT_P (XEXP (rhs
, 1)))
1955 coeff1
= shwi_to_double_int (INTVAL (XEXP (rhs
, 1)));
1956 rhs
= XEXP (rhs
, 0);
1958 else if (GET_CODE (rhs
) == ASHIFT
1959 && CONST_INT_P (XEXP (rhs
, 1))
1960 && INTVAL (XEXP (rhs
, 1)) >= 0
1961 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1963 coeff1
= double_int_setbit (double_int_zero
,
1964 INTVAL (XEXP (rhs
, 1)));
1965 rhs
= XEXP (rhs
, 0);
1968 if (rtx_equal_p (lhs
, rhs
))
1970 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1973 bool speed
= optimize_function_for_speed_p (cfun
);
1975 val
= double_int_add (coeff0
, coeff1
);
1976 coeff
= immed_double_int_const (val
, mode
);
1978 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1979 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
1984 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1985 if ((CONST_INT_P (op1
)
1986 || GET_CODE (op1
) == CONST_DOUBLE
)
1987 && GET_CODE (op0
) == XOR
1988 && (CONST_INT_P (XEXP (op0
, 1))
1989 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1990 && mode_signbit_p (mode
, op1
))
1991 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1992 simplify_gen_binary (XOR
, mode
, op1
,
1995 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1996 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1997 && GET_CODE (op0
) == MULT
1998 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2002 in1
= XEXP (XEXP (op0
, 0), 0);
2003 in2
= XEXP (op0
, 1);
2004 return simplify_gen_binary (MINUS
, mode
, op1
,
2005 simplify_gen_binary (MULT
, mode
,
2009 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2010 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2012 if (COMPARISON_P (op0
)
2013 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2014 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2015 && (reversed
= reversed_comparison (op0
, mode
)))
2017 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2019 /* If one of the operands is a PLUS or a MINUS, see if we can
2020 simplify this by the associative law.
2021 Don't use the associative law for floating point.
2022 The inaccuracy makes it nonassociative,
2023 and subtle programs can break if operations are associated. */
2025 if (INTEGRAL_MODE_P (mode
)
2026 && (plus_minus_operand_p (op0
)
2027 || plus_minus_operand_p (op1
))
2028 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2031 /* Reassociate floating point addition only when the user
2032 specifies associative math operations. */
2033 if (FLOAT_MODE_P (mode
)
2034 && flag_associative_math
)
2036 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2043 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2044 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2045 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2046 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2048 rtx xop00
= XEXP (op0
, 0);
2049 rtx xop10
= XEXP (op1
, 0);
2052 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2054 if (REG_P (xop00
) && REG_P (xop10
)
2055 && GET_MODE (xop00
) == GET_MODE (xop10
)
2056 && REGNO (xop00
) == REGNO (xop10
)
2057 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2058 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2065 /* We can't assume x-x is 0 even with non-IEEE floating point,
2066 but since it is zero except in very strange circumstances, we
2067 will treat it as zero with -ffinite-math-only. */
2068 if (rtx_equal_p (trueop0
, trueop1
)
2069 && ! side_effects_p (op0
)
2070 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2071 return CONST0_RTX (mode
);
2073 /* Change subtraction from zero into negation. (0 - x) is the
2074 same as -x when x is NaN, infinite, or finite and nonzero.
2075 But if the mode has signed zeros, and does not round towards
2076 -infinity, then 0 - 0 is 0, not -0. */
2077 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2078 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2080 /* (-1 - a) is ~a. */
2081 if (trueop0
== constm1_rtx
)
2082 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2084 /* Subtracting 0 has no effect unless the mode has signed zeros
2085 and supports rounding towards -infinity. In such a case,
2087 if (!(HONOR_SIGNED_ZEROS (mode
)
2088 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2089 && trueop1
== CONST0_RTX (mode
))
2092 /* See if this is something like X * C - X or vice versa or
2093 if the multiplication is written as a shift. If so, we can
2094 distribute and make a new multiply, shift, or maybe just
2095 have X (if C is 2 in the example above). But don't make
2096 something more expensive than we had before. */
2098 if (SCALAR_INT_MODE_P (mode
))
2100 double_int coeff0
, negcoeff1
;
2101 rtx lhs
= op0
, rhs
= op1
;
2103 coeff0
= double_int_one
;
2104 negcoeff1
= double_int_minus_one
;
2106 if (GET_CODE (lhs
) == NEG
)
2108 coeff0
= double_int_minus_one
;
2109 lhs
= XEXP (lhs
, 0);
2111 else if (GET_CODE (lhs
) == MULT
2112 && CONST_INT_P (XEXP (lhs
, 1)))
2114 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
2115 lhs
= XEXP (lhs
, 0);
2117 else if (GET_CODE (lhs
) == ASHIFT
2118 && CONST_INT_P (XEXP (lhs
, 1))
2119 && INTVAL (XEXP (lhs
, 1)) >= 0
2120 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2122 coeff0
= double_int_setbit (double_int_zero
,
2123 INTVAL (XEXP (lhs
, 1)));
2124 lhs
= XEXP (lhs
, 0);
2127 if (GET_CODE (rhs
) == NEG
)
2129 negcoeff1
= double_int_one
;
2130 rhs
= XEXP (rhs
, 0);
2132 else if (GET_CODE (rhs
) == MULT
2133 && CONST_INT_P (XEXP (rhs
, 1)))
2135 negcoeff1
= shwi_to_double_int (-INTVAL (XEXP (rhs
, 1)));
2136 rhs
= XEXP (rhs
, 0);
2138 else if (GET_CODE (rhs
) == ASHIFT
2139 && CONST_INT_P (XEXP (rhs
, 1))
2140 && INTVAL (XEXP (rhs
, 1)) >= 0
2141 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2143 negcoeff1
= double_int_setbit (double_int_zero
,
2144 INTVAL (XEXP (rhs
, 1)));
2145 negcoeff1
= double_int_neg (negcoeff1
);
2146 rhs
= XEXP (rhs
, 0);
2149 if (rtx_equal_p (lhs
, rhs
))
2151 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2154 bool speed
= optimize_function_for_speed_p (cfun
);
2156 val
= double_int_add (coeff0
, negcoeff1
);
2157 coeff
= immed_double_int_const (val
, mode
);
2159 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2160 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
2165 /* (a - (-b)) -> (a + b). True even for IEEE. */
2166 if (GET_CODE (op1
) == NEG
)
2167 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2169 /* (-x - c) may be simplified as (-c - x). */
2170 if (GET_CODE (op0
) == NEG
2171 && (CONST_INT_P (op1
)
2172 || GET_CODE (op1
) == CONST_DOUBLE
))
2174 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2176 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2179 /* Don't let a relocatable value get a negative coeff. */
2180 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2181 return simplify_gen_binary (PLUS
, mode
,
2183 neg_const_int (mode
, op1
));
2185 /* (x - (x & y)) -> (x & ~y) */
2186 if (GET_CODE (op1
) == AND
)
2188 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2190 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2191 GET_MODE (XEXP (op1
, 1)));
2192 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2194 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2196 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2197 GET_MODE (XEXP (op1
, 0)));
2198 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2202 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2203 by reversing the comparison code if valid. */
2204 if (STORE_FLAG_VALUE
== 1
2205 && trueop0
== const1_rtx
2206 && COMPARISON_P (op1
)
2207 && (reversed
= reversed_comparison (op1
, mode
)))
2210 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2211 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2212 && GET_CODE (op1
) == MULT
2213 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2217 in1
= XEXP (XEXP (op1
, 0), 0);
2218 in2
= XEXP (op1
, 1);
2219 return simplify_gen_binary (PLUS
, mode
,
2220 simplify_gen_binary (MULT
, mode
,
2225 /* Canonicalize (minus (neg A) (mult B C)) to
2226 (minus (mult (neg B) C) A). */
2227 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2228 && GET_CODE (op1
) == MULT
2229 && GET_CODE (op0
) == NEG
)
2233 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2234 in2
= XEXP (op1
, 1);
2235 return simplify_gen_binary (MINUS
, mode
,
2236 simplify_gen_binary (MULT
, mode
,
2241 /* If one of the operands is a PLUS or a MINUS, see if we can
2242 simplify this by the associative law. This will, for example,
2243 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2244 Don't use the associative law for floating point.
2245 The inaccuracy makes it nonassociative,
2246 and subtle programs can break if operations are associated. */
2248 if (INTEGRAL_MODE_P (mode
)
2249 && (plus_minus_operand_p (op0
)
2250 || plus_minus_operand_p (op1
))
2251 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2256 if (trueop1
== constm1_rtx
)
2257 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2259 if (GET_CODE (op0
) == NEG
)
2261 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2263 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2265 if (GET_CODE (op1
) == NEG
)
2267 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2269 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2272 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2273 x is NaN, since x * 0 is then also NaN. Nor is it valid
2274 when the mode has signed zeros, since multiplying a negative
2275 number by 0 will give -0, not 0. */
2276 if (!HONOR_NANS (mode
)
2277 && !HONOR_SIGNED_ZEROS (mode
)
2278 && trueop1
== CONST0_RTX (mode
)
2279 && ! side_effects_p (op0
))
2282 /* In IEEE floating point, x*1 is not equivalent to x for
2284 if (!HONOR_SNANS (mode
)
2285 && trueop1
== CONST1_RTX (mode
))
2288 /* Convert multiply by constant power of two into shift unless
2289 we are still generating RTL. This test is a kludge. */
2290 if (CONST_INT_P (trueop1
)
2291 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2292 /* If the mode is larger than the host word size, and the
2293 uppermost bit is set, then this isn't a power of two due
2294 to implicit sign extension. */
2295 && (width
<= HOST_BITS_PER_WIDE_INT
2296 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2297 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2299 /* Likewise for multipliers wider than a word. */
2300 if (GET_CODE (trueop1
) == CONST_DOUBLE
2301 && (GET_MODE (trueop1
) == VOIDmode
2302 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
2303 && GET_MODE (op0
) == mode
2304 && CONST_DOUBLE_LOW (trueop1
) == 0
2305 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
2306 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2307 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2309 /* x*2 is x+x and x*(-1) is -x */
2310 if (GET_CODE (trueop1
) == CONST_DOUBLE
2311 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2312 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2313 && GET_MODE (op0
) == mode
)
2316 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2318 if (REAL_VALUES_EQUAL (d
, dconst2
))
2319 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2321 if (!HONOR_SNANS (mode
)
2322 && REAL_VALUES_EQUAL (d
, dconstm1
))
2323 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2326 /* Optimize -x * -x as x * x. */
2327 if (FLOAT_MODE_P (mode
)
2328 && GET_CODE (op0
) == NEG
2329 && GET_CODE (op1
) == NEG
2330 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2331 && !side_effects_p (XEXP (op0
, 0)))
2332 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2334 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2335 if (SCALAR_FLOAT_MODE_P (mode
)
2336 && GET_CODE (op0
) == ABS
2337 && GET_CODE (op1
) == ABS
2338 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2339 && !side_effects_p (XEXP (op0
, 0)))
2340 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2342 /* Reassociate multiplication, but for floating point MULTs
2343 only when the user specifies unsafe math optimizations. */
2344 if (! FLOAT_MODE_P (mode
)
2345 || flag_unsafe_math_optimizations
)
2347 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2354 if (trueop1
== CONST0_RTX (mode
))
2356 if (CONST_INT_P (trueop1
)
2357 && ((UINTVAL (trueop1
) & GET_MODE_MASK (mode
))
2358 == GET_MODE_MASK (mode
)))
2360 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2362 /* A | (~A) -> -1 */
2363 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2364 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2365 && ! side_effects_p (op0
)
2366 && SCALAR_INT_MODE_P (mode
))
2369 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2370 if (CONST_INT_P (op1
)
2371 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2372 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0)
2375 /* Canonicalize (X & C1) | C2. */
2376 if (GET_CODE (op0
) == AND
2377 && CONST_INT_P (trueop1
)
2378 && CONST_INT_P (XEXP (op0
, 1)))
2380 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2381 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2382 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2384 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2386 && !side_effects_p (XEXP (op0
, 0)))
2389 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2390 if (((c1
|c2
) & mask
) == mask
)
2391 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2393 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2394 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2396 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2397 gen_int_mode (c1
& ~c2
, mode
));
2398 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2402 /* Convert (A & B) | A to A. */
2403 if (GET_CODE (op0
) == AND
2404 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2405 || rtx_equal_p (XEXP (op0
, 1), op1
))
2406 && ! side_effects_p (XEXP (op0
, 0))
2407 && ! side_effects_p (XEXP (op0
, 1)))
2410 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2411 mode size to (rotate A CX). */
2413 if (GET_CODE (op1
) == ASHIFT
2414 || GET_CODE (op1
) == SUBREG
)
2425 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2426 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2427 && CONST_INT_P (XEXP (opleft
, 1))
2428 && CONST_INT_P (XEXP (opright
, 1))
2429 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2430 == GET_MODE_BITSIZE (mode
)))
2431 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2433 /* Same, but for ashift that has been "simplified" to a wider mode
2434 by simplify_shift_const. */
2436 if (GET_CODE (opleft
) == SUBREG
2437 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2438 && GET_CODE (opright
) == LSHIFTRT
2439 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2440 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2441 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2442 && (GET_MODE_SIZE (GET_MODE (opleft
))
2443 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2444 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2445 SUBREG_REG (XEXP (opright
, 0)))
2446 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2447 && CONST_INT_P (XEXP (opright
, 1))
2448 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2449 == GET_MODE_BITSIZE (mode
)))
2450 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2451 XEXP (SUBREG_REG (opleft
), 1));
2453 /* If we have (ior (and (X C1) C2)), simplify this by making
2454 C1 as small as possible if C1 actually changes. */
2455 if (CONST_INT_P (op1
)
2456 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2457 || INTVAL (op1
) > 0)
2458 && GET_CODE (op0
) == AND
2459 && CONST_INT_P (XEXP (op0
, 1))
2460 && CONST_INT_P (op1
)
2461 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2462 return simplify_gen_binary (IOR
, mode
,
2464 (AND
, mode
, XEXP (op0
, 0),
2465 GEN_INT (UINTVAL (XEXP (op0
, 1))
2469 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2470 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2471 the PLUS does not affect any of the bits in OP1: then we can do
2472 the IOR as a PLUS and we can associate. This is valid if OP1
2473 can be safely shifted left C bits. */
2474 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2475 && GET_CODE (XEXP (op0
, 0)) == PLUS
2476 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2477 && CONST_INT_P (XEXP (op0
, 1))
2478 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2480 int count
= INTVAL (XEXP (op0
, 1));
2481 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2483 if (mask
>> count
== INTVAL (trueop1
)
2484 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2485 return simplify_gen_binary (ASHIFTRT
, mode
,
2486 plus_constant (XEXP (op0
, 0), mask
),
2490 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2496 if (trueop1
== CONST0_RTX (mode
))
2498 if (CONST_INT_P (trueop1
)
2499 && ((UINTVAL (trueop1
) & GET_MODE_MASK (mode
))
2500 == GET_MODE_MASK (mode
)))
2501 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2502 if (rtx_equal_p (trueop0
, trueop1
)
2503 && ! side_effects_p (op0
)
2504 && GET_MODE_CLASS (mode
) != MODE_CC
)
2505 return CONST0_RTX (mode
);
2507 /* Canonicalize XOR of the most significant bit to PLUS. */
2508 if ((CONST_INT_P (op1
)
2509 || GET_CODE (op1
) == CONST_DOUBLE
)
2510 && mode_signbit_p (mode
, op1
))
2511 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2512 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2513 if ((CONST_INT_P (op1
)
2514 || GET_CODE (op1
) == CONST_DOUBLE
)
2515 && GET_CODE (op0
) == PLUS
2516 && (CONST_INT_P (XEXP (op0
, 1))
2517 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2518 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2519 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2520 simplify_gen_binary (XOR
, mode
, op1
,
2523 /* If we are XORing two things that have no bits in common,
2524 convert them into an IOR. This helps to detect rotation encoded
2525 using those methods and possibly other simplifications. */
2527 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2528 && (nonzero_bits (op0
, mode
)
2529 & nonzero_bits (op1
, mode
)) == 0)
2530 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2532 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2533 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2536 int num_negated
= 0;
2538 if (GET_CODE (op0
) == NOT
)
2539 num_negated
++, op0
= XEXP (op0
, 0);
2540 if (GET_CODE (op1
) == NOT
)
2541 num_negated
++, op1
= XEXP (op1
, 0);
2543 if (num_negated
== 2)
2544 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2545 else if (num_negated
== 1)
2546 return simplify_gen_unary (NOT
, mode
,
2547 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2551 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2552 correspond to a machine insn or result in further simplifications
2553 if B is a constant. */
2555 if (GET_CODE (op0
) == AND
2556 && rtx_equal_p (XEXP (op0
, 1), op1
)
2557 && ! side_effects_p (op1
))
2558 return simplify_gen_binary (AND
, mode
,
2559 simplify_gen_unary (NOT
, mode
,
2560 XEXP (op0
, 0), mode
),
2563 else if (GET_CODE (op0
) == AND
2564 && rtx_equal_p (XEXP (op0
, 0), op1
)
2565 && ! side_effects_p (op1
))
2566 return simplify_gen_binary (AND
, mode
,
2567 simplify_gen_unary (NOT
, mode
,
2568 XEXP (op0
, 1), mode
),
2571 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2572 we can transform like this:
2573 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2574 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2575 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2576 Attempt a few simplifications when B and C are both constants. */
2577 if (GET_CODE (op0
) == AND
2578 && CONST_INT_P (op1
)
2579 && CONST_INT_P (XEXP (op0
, 1)))
2581 rtx a
= XEXP (op0
, 0);
2582 rtx b
= XEXP (op0
, 1);
2584 HOST_WIDE_INT bval
= INTVAL (b
);
2585 HOST_WIDE_INT cval
= INTVAL (c
);
2588 = simplify_binary_operation (AND
, mode
,
2589 simplify_gen_unary (NOT
, mode
, a
, mode
),
2591 if ((~cval
& bval
) == 0)
2593 /* Try to simplify ~A&C | ~B&C. */
2594 if (na_c
!= NULL_RTX
)
2595 return simplify_gen_binary (IOR
, mode
, na_c
,
2596 GEN_INT (~bval
& cval
));
2600 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2601 if (na_c
== const0_rtx
)
2603 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2604 GEN_INT (~cval
& bval
));
2605 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2606 GEN_INT (~bval
& cval
));
2611 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2612 comparison if STORE_FLAG_VALUE is 1. */
2613 if (STORE_FLAG_VALUE
== 1
2614 && trueop1
== const1_rtx
2615 && COMPARISON_P (op0
)
2616 && (reversed
= reversed_comparison (op0
, mode
)))
2619 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2620 is (lt foo (const_int 0)), so we can perform the above
2621 simplification if STORE_FLAG_VALUE is 1. */
2623 if (STORE_FLAG_VALUE
== 1
2624 && trueop1
== const1_rtx
2625 && GET_CODE (op0
) == LSHIFTRT
2626 && CONST_INT_P (XEXP (op0
, 1))
2627 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2628 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2630 /* (xor (comparison foo bar) (const_int sign-bit))
2631 when STORE_FLAG_VALUE is the sign bit. */
2632 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2633 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2634 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2635 && trueop1
== const_true_rtx
2636 && COMPARISON_P (op0
)
2637 && (reversed
= reversed_comparison (op0
, mode
)))
2640 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2646 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2648 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
2650 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2651 HOST_WIDE_INT nzop1
;
2652 if (CONST_INT_P (trueop1
))
2654 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2655 /* If we are turning off bits already known off in OP0, we need
2657 if ((nzop0
& ~val1
) == 0)
2660 nzop1
= nonzero_bits (trueop1
, mode
);
2661 /* If we are clearing all the nonzero bits, the result is zero. */
2662 if ((nzop1
& nzop0
) == 0
2663 && !side_effects_p (op0
) && !side_effects_p (op1
))
2664 return CONST0_RTX (mode
);
2666 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2667 && GET_MODE_CLASS (mode
) != MODE_CC
)
2670 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2671 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2672 && ! side_effects_p (op0
)
2673 && GET_MODE_CLASS (mode
) != MODE_CC
)
2674 return CONST0_RTX (mode
);
2676 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2677 there are no nonzero bits of C outside of X's mode. */
2678 if ((GET_CODE (op0
) == SIGN_EXTEND
2679 || GET_CODE (op0
) == ZERO_EXTEND
)
2680 && CONST_INT_P (trueop1
)
2681 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2682 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2683 & UINTVAL (trueop1
)) == 0)
2685 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2686 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2687 gen_int_mode (INTVAL (trueop1
),
2689 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2692 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2693 we might be able to further simplify the AND with X and potentially
2694 remove the truncation altogether. */
2695 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2697 rtx x
= XEXP (op0
, 0);
2698 enum machine_mode xmode
= GET_MODE (x
);
2699 tem
= simplify_gen_binary (AND
, xmode
, x
,
2700 gen_int_mode (INTVAL (trueop1
), xmode
));
2701 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2704 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2705 if (GET_CODE (op0
) == IOR
2706 && CONST_INT_P (trueop1
)
2707 && CONST_INT_P (XEXP (op0
, 1)))
2709 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2710 return simplify_gen_binary (IOR
, mode
,
2711 simplify_gen_binary (AND
, mode
,
2712 XEXP (op0
, 0), op1
),
2713 gen_int_mode (tmp
, mode
));
2716 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2717 insn (and may simplify more). */
2718 if (GET_CODE (op0
) == XOR
2719 && rtx_equal_p (XEXP (op0
, 0), op1
)
2720 && ! side_effects_p (op1
))
2721 return simplify_gen_binary (AND
, mode
,
2722 simplify_gen_unary (NOT
, mode
,
2723 XEXP (op0
, 1), mode
),
2726 if (GET_CODE (op0
) == XOR
2727 && rtx_equal_p (XEXP (op0
, 1), op1
)
2728 && ! side_effects_p (op1
))
2729 return simplify_gen_binary (AND
, mode
,
2730 simplify_gen_unary (NOT
, mode
,
2731 XEXP (op0
, 0), mode
),
2734 /* Similarly for (~(A ^ B)) & A. */
2735 if (GET_CODE (op0
) == NOT
2736 && GET_CODE (XEXP (op0
, 0)) == XOR
2737 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2738 && ! side_effects_p (op1
))
2739 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2741 if (GET_CODE (op0
) == NOT
2742 && GET_CODE (XEXP (op0
, 0)) == XOR
2743 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2744 && ! side_effects_p (op1
))
2745 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2747 /* Convert (A | B) & A to A. */
2748 if (GET_CODE (op0
) == IOR
2749 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2750 || rtx_equal_p (XEXP (op0
, 1), op1
))
2751 && ! side_effects_p (XEXP (op0
, 0))
2752 && ! side_effects_p (XEXP (op0
, 1)))
2755 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2756 ((A & N) + B) & M -> (A + B) & M
2757 Similarly if (N & M) == 0,
2758 ((A | N) + B) & M -> (A + B) & M
2759 and for - instead of + and/or ^ instead of |.
2760 Also, if (N & M) == 0, then
2761 (A +- N) & M -> A & M. */
2762 if (CONST_INT_P (trueop1
)
2763 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2764 && ~UINTVAL (trueop1
)
2765 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2766 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2771 pmop
[0] = XEXP (op0
, 0);
2772 pmop
[1] = XEXP (op0
, 1);
2774 if (CONST_INT_P (pmop
[1])
2775 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2776 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2778 for (which
= 0; which
< 2; which
++)
2781 switch (GET_CODE (tem
))
2784 if (CONST_INT_P (XEXP (tem
, 1))
2785 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2786 == UINTVAL (trueop1
))
2787 pmop
[which
] = XEXP (tem
, 0);
2791 if (CONST_INT_P (XEXP (tem
, 1))
2792 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2793 pmop
[which
] = XEXP (tem
, 0);
2800 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2802 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2804 return simplify_gen_binary (code
, mode
, tem
, op1
);
2808 /* (and X (ior (not X) Y) -> (and X Y) */
2809 if (GET_CODE (op1
) == IOR
2810 && GET_CODE (XEXP (op1
, 0)) == NOT
2811 && op0
== XEXP (XEXP (op1
, 0), 0))
2812 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2814 /* (and (ior (not X) Y) X) -> (and X Y) */
2815 if (GET_CODE (op0
) == IOR
2816 && GET_CODE (XEXP (op0
, 0)) == NOT
2817 && op1
== XEXP (XEXP (op0
, 0), 0))
2818 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2820 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2826 /* 0/x is 0 (or x&0 if x has side-effects). */
2827 if (trueop0
== CONST0_RTX (mode
))
2829 if (side_effects_p (op1
))
2830 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2834 if (trueop1
== CONST1_RTX (mode
))
2835 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2836 /* Convert divide by power of two into shift. */
2837 if (CONST_INT_P (trueop1
)
2838 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
2839 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2843 /* Handle floating point and integers separately. */
2844 if (SCALAR_FLOAT_MODE_P (mode
))
2846 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2847 safe for modes with NaNs, since 0.0 / 0.0 will then be
2848 NaN rather than 0.0. Nor is it safe for modes with signed
2849 zeros, since dividing 0 by a negative number gives -0.0 */
2850 if (trueop0
== CONST0_RTX (mode
)
2851 && !HONOR_NANS (mode
)
2852 && !HONOR_SIGNED_ZEROS (mode
)
2853 && ! side_effects_p (op1
))
2856 if (trueop1
== CONST1_RTX (mode
)
2857 && !HONOR_SNANS (mode
))
2860 if (GET_CODE (trueop1
) == CONST_DOUBLE
2861 && trueop1
!= CONST0_RTX (mode
))
2864 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2867 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2868 && !HONOR_SNANS (mode
))
2869 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2871 /* Change FP division by a constant into multiplication.
2872 Only do this with -freciprocal-math. */
2873 if (flag_reciprocal_math
2874 && !REAL_VALUES_EQUAL (d
, dconst0
))
2876 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2877 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2878 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2884 /* 0/x is 0 (or x&0 if x has side-effects). */
2885 if (trueop0
== CONST0_RTX (mode
)
2886 && !cfun
->can_throw_non_call_exceptions
)
2888 if (side_effects_p (op1
))
2889 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2893 if (trueop1
== CONST1_RTX (mode
))
2894 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2896 if (trueop1
== constm1_rtx
)
2898 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2899 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2905 /* 0%x is 0 (or x&0 if x has side-effects). */
2906 if (trueop0
== CONST0_RTX (mode
))
2908 if (side_effects_p (op1
))
2909 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2912 /* x%1 is 0 (of x&0 if x has side-effects). */
2913 if (trueop1
== CONST1_RTX (mode
))
2915 if (side_effects_p (op0
))
2916 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2917 return CONST0_RTX (mode
);
2919 /* Implement modulus by power of two as AND. */
2920 if (CONST_INT_P (trueop1
)
2921 && exact_log2 (UINTVAL (trueop1
)) > 0)
2922 return simplify_gen_binary (AND
, mode
, op0
,
2923 GEN_INT (INTVAL (op1
) - 1));
2927 /* 0%x is 0 (or x&0 if x has side-effects). */
2928 if (trueop0
== CONST0_RTX (mode
))
2930 if (side_effects_p (op1
))
2931 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2934 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2935 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2937 if (side_effects_p (op0
))
2938 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2939 return CONST0_RTX (mode
);
2946 if (trueop1
== CONST0_RTX (mode
))
2948 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2950 /* Rotating ~0 always results in ~0. */
2951 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
2952 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
2953 && ! side_effects_p (op1
))
2956 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
2958 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
2959 if (val
!= INTVAL (op1
))
2960 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
2967 if (trueop1
== CONST0_RTX (mode
))
2969 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2971 goto canonicalize_shift
;
2974 if (trueop1
== CONST0_RTX (mode
))
2976 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2978 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2979 if (GET_CODE (op0
) == CLZ
2980 && CONST_INT_P (trueop1
)
2981 && STORE_FLAG_VALUE
== 1
2982 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
2984 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2985 unsigned HOST_WIDE_INT zero_val
= 0;
2987 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
2988 && zero_val
== GET_MODE_BITSIZE (imode
)
2989 && INTVAL (trueop1
) == exact_log2 (zero_val
))
2990 return simplify_gen_relational (EQ
, mode
, imode
,
2991 XEXP (op0
, 0), const0_rtx
);
2993 goto canonicalize_shift
;
2996 if (width
<= HOST_BITS_PER_WIDE_INT
2997 && CONST_INT_P (trueop1
)
2998 && UINTVAL (trueop1
) == (unsigned HOST_WIDE_INT
) 1 << (width
-1)
2999 && ! side_effects_p (op0
))
3001 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3003 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3009 if (width
<= HOST_BITS_PER_WIDE_INT
3010 && CONST_INT_P (trueop1
)
3011 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3012 && ! side_effects_p (op0
))
3014 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3016 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3022 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3024 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3026 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3032 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3034 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3036 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3049 /* ??? There are simplifications that can be done. */
3053 if (!VECTOR_MODE_P (mode
))
3055 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3056 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3057 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3058 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3059 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3061 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3062 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3065 /* Extract a scalar element from a nested VEC_SELECT expression
3066 (with optional nested VEC_CONCAT expression). Some targets
3067 (i386) extract scalar element from a vector using chain of
3068 nested VEC_SELECT expressions. When input operand is a memory
3069 operand, this operation can be simplified to a simple scalar
3070 load from an offseted memory address. */
3071 if (GET_CODE (trueop0
) == VEC_SELECT
)
3073 rtx op0
= XEXP (trueop0
, 0);
3074 rtx op1
= XEXP (trueop0
, 1);
3076 enum machine_mode opmode
= GET_MODE (op0
);
3077 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3078 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3080 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3086 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3087 gcc_assert (i
< n_elts
);
3089 /* Select element, pointed by nested selector. */
3090 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3092 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3093 if (GET_CODE (op0
) == VEC_CONCAT
)
3095 rtx op00
= XEXP (op0
, 0);
3096 rtx op01
= XEXP (op0
, 1);
3098 enum machine_mode mode00
, mode01
;
3099 int n_elts00
, n_elts01
;
3101 mode00
= GET_MODE (op00
);
3102 mode01
= GET_MODE (op01
);
3104 /* Find out number of elements of each operand. */
3105 if (VECTOR_MODE_P (mode00
))
3107 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3108 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3113 if (VECTOR_MODE_P (mode01
))
3115 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3116 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3121 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3123 /* Select correct operand of VEC_CONCAT
3124 and adjust selector. */
3125 if (elem
< n_elts01
)
3136 vec
= rtvec_alloc (1);
3137 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3139 tmp
= gen_rtx_fmt_ee (code
, mode
,
3140 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3143 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3144 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3145 return XEXP (trueop0
, 0);
3149 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3150 gcc_assert (GET_MODE_INNER (mode
)
3151 == GET_MODE_INNER (GET_MODE (trueop0
)));
3152 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3154 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3156 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3157 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3158 rtvec v
= rtvec_alloc (n_elts
);
3161 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3162 for (i
= 0; i
< n_elts
; i
++)
3164 rtx x
= XVECEXP (trueop1
, 0, i
);
3166 gcc_assert (CONST_INT_P (x
));
3167 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3171 return gen_rtx_CONST_VECTOR (mode
, v
);
3175 if (XVECLEN (trueop1
, 0) == 1
3176 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3177 && GET_CODE (trueop0
) == VEC_CONCAT
)
3180 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3182 /* Try to find the element in the VEC_CONCAT. */
3183 while (GET_MODE (vec
) != mode
3184 && GET_CODE (vec
) == VEC_CONCAT
)
3186 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3187 if (offset
< vec_size
)
3188 vec
= XEXP (vec
, 0);
3192 vec
= XEXP (vec
, 1);
3194 vec
= avoid_constant_pool_reference (vec
);
3197 if (GET_MODE (vec
) == mode
)
3204 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3205 ? GET_MODE (trueop0
)
3206 : GET_MODE_INNER (mode
));
3207 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3208 ? GET_MODE (trueop1
)
3209 : GET_MODE_INNER (mode
));
3211 gcc_assert (VECTOR_MODE_P (mode
));
3212 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3213 == GET_MODE_SIZE (mode
));
3215 if (VECTOR_MODE_P (op0_mode
))
3216 gcc_assert (GET_MODE_INNER (mode
)
3217 == GET_MODE_INNER (op0_mode
));
3219 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3221 if (VECTOR_MODE_P (op1_mode
))
3222 gcc_assert (GET_MODE_INNER (mode
)
3223 == GET_MODE_INNER (op1_mode
));
3225 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3227 if ((GET_CODE (trueop0
) == CONST_VECTOR
3228 || CONST_INT_P (trueop0
)
3229 || GET_CODE (trueop0
) == CONST_DOUBLE
)
3230 && (GET_CODE (trueop1
) == CONST_VECTOR
3231 || CONST_INT_P (trueop1
)
3232 || GET_CODE (trueop1
) == CONST_DOUBLE
))
3234 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3235 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3236 rtvec v
= rtvec_alloc (n_elts
);
3238 unsigned in_n_elts
= 1;
3240 if (VECTOR_MODE_P (op0_mode
))
3241 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3242 for (i
= 0; i
< n_elts
; i
++)
3246 if (!VECTOR_MODE_P (op0_mode
))
3247 RTVEC_ELT (v
, i
) = trueop0
;
3249 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3253 if (!VECTOR_MODE_P (op1_mode
))
3254 RTVEC_ELT (v
, i
) = trueop1
;
3256 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3261 return gen_rtx_CONST_VECTOR (mode
, v
);
3274 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3277 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3279 unsigned int width
= GET_MODE_BITSIZE (mode
);
3281 if (VECTOR_MODE_P (mode
)
3282 && code
!= VEC_CONCAT
3283 && GET_CODE (op0
) == CONST_VECTOR
3284 && GET_CODE (op1
) == CONST_VECTOR
)
3286 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3287 enum machine_mode op0mode
= GET_MODE (op0
);
3288 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3289 enum machine_mode op1mode
= GET_MODE (op1
);
3290 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3291 rtvec v
= rtvec_alloc (n_elts
);
3294 gcc_assert (op0_n_elts
== n_elts
);
3295 gcc_assert (op1_n_elts
== n_elts
);
3296 for (i
= 0; i
< n_elts
; i
++)
3298 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3299 CONST_VECTOR_ELT (op0
, i
),
3300 CONST_VECTOR_ELT (op1
, i
));
3303 RTVEC_ELT (v
, i
) = x
;
3306 return gen_rtx_CONST_VECTOR (mode
, v
);
3309 if (VECTOR_MODE_P (mode
)
3310 && code
== VEC_CONCAT
3311 && (CONST_INT_P (op0
)
3312 || GET_CODE (op0
) == CONST_DOUBLE
3313 || GET_CODE (op0
) == CONST_FIXED
)
3314 && (CONST_INT_P (op1
)
3315 || GET_CODE (op1
) == CONST_DOUBLE
3316 || GET_CODE (op1
) == CONST_FIXED
))
3318 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3319 rtvec v
= rtvec_alloc (n_elts
);
3321 gcc_assert (n_elts
>= 2);
3324 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3325 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3327 RTVEC_ELT (v
, 0) = op0
;
3328 RTVEC_ELT (v
, 1) = op1
;
3332 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3333 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3336 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3337 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3338 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3340 for (i
= 0; i
< op0_n_elts
; ++i
)
3341 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3342 for (i
= 0; i
< op1_n_elts
; ++i
)
3343 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3346 return gen_rtx_CONST_VECTOR (mode
, v
);
3349 if (SCALAR_FLOAT_MODE_P (mode
)
3350 && GET_CODE (op0
) == CONST_DOUBLE
3351 && GET_CODE (op1
) == CONST_DOUBLE
3352 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3363 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3365 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3367 for (i
= 0; i
< 4; i
++)
3384 real_from_target (&r
, tmp0
, mode
);
3385 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3389 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3392 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3393 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3394 real_convert (&f0
, mode
, &f0
);
3395 real_convert (&f1
, mode
, &f1
);
3397 if (HONOR_SNANS (mode
)
3398 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3402 && REAL_VALUES_EQUAL (f1
, dconst0
)
3403 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3406 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3407 && flag_trapping_math
3408 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3410 int s0
= REAL_VALUE_NEGATIVE (f0
);
3411 int s1
= REAL_VALUE_NEGATIVE (f1
);
3416 /* Inf + -Inf = NaN plus exception. */
3421 /* Inf - Inf = NaN plus exception. */
3426 /* Inf / Inf = NaN plus exception. */
3433 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3434 && flag_trapping_math
3435 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3436 || (REAL_VALUE_ISINF (f1
)
3437 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3438 /* Inf * 0 = NaN plus exception. */
3441 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3443 real_convert (&result
, mode
, &value
);
3445 /* Don't constant fold this floating point operation if
3446 the result has overflowed and flag_trapping_math. */
3448 if (flag_trapping_math
3449 && MODE_HAS_INFINITIES (mode
)
3450 && REAL_VALUE_ISINF (result
)
3451 && !REAL_VALUE_ISINF (f0
)
3452 && !REAL_VALUE_ISINF (f1
))
3453 /* Overflow plus exception. */
3456 /* Don't constant fold this floating point operation if the
3457 result may dependent upon the run-time rounding mode and
3458 flag_rounding_math is set, or if GCC's software emulation
3459 is unable to accurately represent the result. */
3461 if ((flag_rounding_math
3462 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3463 && (inexact
|| !real_identical (&result
, &value
)))
3466 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3470 /* We can fold some multi-word operations. */
3471 if (GET_MODE_CLASS (mode
) == MODE_INT
3472 && width
== HOST_BITS_PER_DOUBLE_INT
3473 && (CONST_DOUBLE_P (op0
) || CONST_INT_P (op0
))
3474 && (CONST_DOUBLE_P (op1
) || CONST_INT_P (op1
)))
3476 double_int o0
, o1
, res
, tmp
;
3478 o0
= rtx_to_double_int (op0
);
3479 o1
= rtx_to_double_int (op1
);
3484 /* A - B == A + (-B). */
3485 o1
= double_int_neg (o1
);
3487 /* Fall through.... */
3490 res
= double_int_add (o0
, o1
);
3494 res
= double_int_mul (o0
, o1
);
3498 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3499 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3500 &res
.low
, &res
.high
,
3501 &tmp
.low
, &tmp
.high
))
3506 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3507 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3508 &tmp
.low
, &tmp
.high
,
3509 &res
.low
, &res
.high
))
3514 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3515 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3516 &res
.low
, &res
.high
,
3517 &tmp
.low
, &tmp
.high
))
3522 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3523 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3524 &tmp
.low
, &tmp
.high
,
3525 &res
.low
, &res
.high
))
3530 res
= double_int_and (o0
, o1
);
3534 res
= double_int_ior (o0
, o1
);
3538 res
= double_int_xor (o0
, o1
);
3542 res
= double_int_smin (o0
, o1
);
3546 res
= double_int_smax (o0
, o1
);
3550 res
= double_int_umin (o0
, o1
);
3554 res
= double_int_umax (o0
, o1
);
3557 case LSHIFTRT
: case ASHIFTRT
:
3559 case ROTATE
: case ROTATERT
:
3561 unsigned HOST_WIDE_INT cnt
;
3563 if (SHIFT_COUNT_TRUNCATED
)
3564 o1
= double_int_zext (o1
, GET_MODE_BITSIZE (mode
));
3566 if (!double_int_fits_in_uhwi_p (o1
)
3567 || double_int_to_uhwi (o1
) >= GET_MODE_BITSIZE (mode
))
3570 cnt
= double_int_to_uhwi (o1
);
3572 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3573 res
= double_int_rshift (o0
, cnt
, GET_MODE_BITSIZE (mode
),
3575 else if (code
== ASHIFT
)
3576 res
= double_int_lshift (o0
, cnt
, GET_MODE_BITSIZE (mode
),
3578 else if (code
== ROTATE
)
3579 res
= double_int_lrotate (o0
, cnt
, GET_MODE_BITSIZE (mode
));
3580 else /* code == ROTATERT */
3581 res
= double_int_rrotate (o0
, cnt
, GET_MODE_BITSIZE (mode
));
3589 return immed_double_int_const (res
, mode
);
3592 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3593 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3595 /* Get the integer argument values in two forms:
3596 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3598 arg0
= INTVAL (op0
);
3599 arg1
= INTVAL (op1
);
3601 if (width
< HOST_BITS_PER_WIDE_INT
)
3603 arg0
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
3604 arg1
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
3607 if (arg0s
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)))
3608 arg0s
|= ((unsigned HOST_WIDE_INT
) (-1) << width
);
3611 if (arg1s
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)))
3612 arg1s
|= ((unsigned HOST_WIDE_INT
) (-1) << width
);
3620 /* Compute the value of the arithmetic. */
3625 val
= arg0s
+ arg1s
;
3629 val
= arg0s
- arg1s
;
3633 val
= arg0s
* arg1s
;
3638 || ((unsigned HOST_WIDE_INT
) arg0s
3639 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3642 val
= arg0s
/ arg1s
;
3647 || ((unsigned HOST_WIDE_INT
) arg0s
3648 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3651 val
= arg0s
% arg1s
;
3656 || ((unsigned HOST_WIDE_INT
) arg0s
3657 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3660 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3665 || ((unsigned HOST_WIDE_INT
) arg0s
3666 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3669 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3687 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3688 the value is in range. We can't return any old value for
3689 out-of-range arguments because either the middle-end (via
3690 shift_truncation_mask) or the back-end might be relying on
3691 target-specific knowledge. Nor can we rely on
3692 shift_truncation_mask, since the shift might not be part of an
3693 ashlM3, lshrM3 or ashrM3 instruction. */
3694 if (SHIFT_COUNT_TRUNCATED
)
3695 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3696 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3699 val
= (code
== ASHIFT
3700 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3701 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3703 /* Sign-extend the result for arithmetic right shifts. */
3704 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3705 val
|= ((unsigned HOST_WIDE_INT
) (-1)) << (width
- arg1
);
3713 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3714 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3722 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3723 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3727 /* Do nothing here. */
3731 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3735 val
= ((unsigned HOST_WIDE_INT
) arg0
3736 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3740 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3744 val
= ((unsigned HOST_WIDE_INT
) arg0
3745 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3758 /* ??? There are simplifications that can be done. */
3765 return gen_int_mode (val
, mode
);
3773 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3776 Rather than test for specific case, we do this by a brute-force method
3777 and do all possible simplifications until no more changes occur. Then
3778 we rebuild the operation. */
3780 struct simplify_plus_minus_op_data
3787 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3791 result
= (commutative_operand_precedence (y
)
3792 - commutative_operand_precedence (x
));
3796 /* Group together equal REGs to do more simplification. */
3797 if (REG_P (x
) && REG_P (y
))
3798 return REGNO (x
) > REGNO (y
);
3804 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3807 struct simplify_plus_minus_op_data ops
[8];
3809 int n_ops
= 2, input_ops
= 2;
3810 int changed
, n_constants
= 0, canonicalized
= 0;
3813 memset (ops
, 0, sizeof ops
);
3815 /* Set up the two operands and then expand them until nothing has been
3816 changed. If we run out of room in our array, give up; this should
3817 almost never happen. */
3822 ops
[1].neg
= (code
== MINUS
);
3828 for (i
= 0; i
< n_ops
; i
++)
3830 rtx this_op
= ops
[i
].op
;
3831 int this_neg
= ops
[i
].neg
;
3832 enum rtx_code this_code
= GET_CODE (this_op
);
3841 ops
[n_ops
].op
= XEXP (this_op
, 1);
3842 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3845 ops
[i
].op
= XEXP (this_op
, 0);
3848 canonicalized
|= this_neg
;
3852 ops
[i
].op
= XEXP (this_op
, 0);
3853 ops
[i
].neg
= ! this_neg
;
3860 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3861 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3862 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3864 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3865 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3866 ops
[n_ops
].neg
= this_neg
;
3874 /* ~a -> (-a - 1) */
3877 ops
[n_ops
].op
= constm1_rtx
;
3878 ops
[n_ops
++].neg
= this_neg
;
3879 ops
[i
].op
= XEXP (this_op
, 0);
3880 ops
[i
].neg
= !this_neg
;
3890 ops
[i
].op
= neg_const_int (mode
, this_op
);
3904 if (n_constants
> 1)
3907 gcc_assert (n_ops
>= 2);
3909 /* If we only have two operands, we can avoid the loops. */
3912 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3915 /* Get the two operands. Be careful with the order, especially for
3916 the cases where code == MINUS. */
3917 if (ops
[0].neg
&& ops
[1].neg
)
3919 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3922 else if (ops
[0].neg
)
3933 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3936 /* Now simplify each pair of operands until nothing changes. */
3939 /* Insertion sort is good enough for an eight-element array. */
3940 for (i
= 1; i
< n_ops
; i
++)
3942 struct simplify_plus_minus_op_data save
;
3944 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
3950 ops
[j
+ 1] = ops
[j
];
3951 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
3956 for (i
= n_ops
- 1; i
> 0; i
--)
3957 for (j
= i
- 1; j
>= 0; j
--)
3959 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
3960 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
3962 if (lhs
!= 0 && rhs
!= 0)
3964 enum rtx_code ncode
= PLUS
;
3970 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3972 else if (swap_commutative_operands_p (lhs
, rhs
))
3973 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3975 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
3976 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
3978 rtx tem_lhs
, tem_rhs
;
3980 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3981 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3982 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3984 if (tem
&& !CONSTANT_P (tem
))
3985 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3988 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3990 /* Reject "simplifications" that just wrap the two
3991 arguments in a CONST. Failure to do so can result
3992 in infinite recursion with simplify_binary_operation
3993 when it calls us to simplify CONST operations. */
3995 && ! (GET_CODE (tem
) == CONST
3996 && GET_CODE (XEXP (tem
, 0)) == ncode
3997 && XEXP (XEXP (tem
, 0), 0) == lhs
3998 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4001 if (GET_CODE (tem
) == NEG
)
4002 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4003 if (CONST_INT_P (tem
) && lneg
)
4004 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4008 ops
[j
].op
= NULL_RTX
;
4015 /* If nothing changed, fail. */
4019 /* Pack all the operands to the lower-numbered entries. */
4020 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4030 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4032 && CONST_INT_P (ops
[1].op
)
4033 && CONSTANT_P (ops
[0].op
)
4035 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4037 /* We suppressed creation of trivial CONST expressions in the
4038 combination loop to avoid recursion. Create one manually now.
4039 The combination loop should have ensured that there is exactly
4040 one CONST_INT, and the sort will have ensured that it is last
4041 in the array and that any other constant will be next-to-last. */
4044 && CONST_INT_P (ops
[n_ops
- 1].op
)
4045 && CONSTANT_P (ops
[n_ops
- 2].op
))
4047 rtx value
= ops
[n_ops
- 1].op
;
4048 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4049 value
= neg_const_int (mode
, value
);
4050 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
4054 /* Put a non-negated operand first, if possible. */
4056 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4059 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4068 /* Now make the result by performing the requested operations. */
4070 for (i
= 1; i
< n_ops
; i
++)
4071 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4072 mode
, result
, ops
[i
].op
);
4077 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4079 plus_minus_operand_p (const_rtx x
)
4081 return GET_CODE (x
) == PLUS
4082 || GET_CODE (x
) == MINUS
4083 || (GET_CODE (x
) == CONST
4084 && GET_CODE (XEXP (x
, 0)) == PLUS
4085 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4086 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4089 /* Like simplify_binary_operation except used for relational operators.
4090 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4091 not also be VOIDmode.
4093 CMP_MODE specifies in which mode the comparison is done in, so it is
4094 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4095 the operands or, if both are VOIDmode, the operands are compared in
4096 "infinite precision". */
4098 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4099 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4101 rtx tem
, trueop0
, trueop1
;
4103 if (cmp_mode
== VOIDmode
)
4104 cmp_mode
= GET_MODE (op0
);
4105 if (cmp_mode
== VOIDmode
)
4106 cmp_mode
= GET_MODE (op1
);
4108 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4111 if (SCALAR_FLOAT_MODE_P (mode
))
4113 if (tem
== const0_rtx
)
4114 return CONST0_RTX (mode
);
4115 #ifdef FLOAT_STORE_FLAG_VALUE
4117 REAL_VALUE_TYPE val
;
4118 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4119 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4125 if (VECTOR_MODE_P (mode
))
4127 if (tem
== const0_rtx
)
4128 return CONST0_RTX (mode
);
4129 #ifdef VECTOR_STORE_FLAG_VALUE
4134 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4135 if (val
== NULL_RTX
)
4137 if (val
== const1_rtx
)
4138 return CONST1_RTX (mode
);
4140 units
= GET_MODE_NUNITS (mode
);
4141 v
= rtvec_alloc (units
);
4142 for (i
= 0; i
< units
; i
++)
4143 RTVEC_ELT (v
, i
) = val
;
4144 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4154 /* For the following tests, ensure const0_rtx is op1. */
4155 if (swap_commutative_operands_p (op0
, op1
)
4156 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4157 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4159 /* If op0 is a compare, extract the comparison arguments from it. */
4160 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4161 return simplify_gen_relational (code
, mode
, VOIDmode
,
4162 XEXP (op0
, 0), XEXP (op0
, 1));
4164 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4168 trueop0
= avoid_constant_pool_reference (op0
);
4169 trueop1
= avoid_constant_pool_reference (op1
);
4170 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4174 /* This part of simplify_relational_operation is only used when CMP_MODE
4175 is not in class MODE_CC (i.e. it is a real comparison).
4177 MODE is the mode of the result, while CMP_MODE specifies in which
4178 mode the comparison is done in, so it is the mode of the operands. */
4181 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4182 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4184 enum rtx_code op0code
= GET_CODE (op0
);
4186 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4188 /* If op0 is a comparison, extract the comparison arguments
4192 if (GET_MODE (op0
) == mode
)
4193 return simplify_rtx (op0
);
4195 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4196 XEXP (op0
, 0), XEXP (op0
, 1));
4198 else if (code
== EQ
)
4200 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4201 if (new_code
!= UNKNOWN
)
4202 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4203 XEXP (op0
, 0), XEXP (op0
, 1));
4207 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4208 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4209 if ((code
== LTU
|| code
== GEU
)
4210 && GET_CODE (op0
) == PLUS
4211 && CONST_INT_P (XEXP (op0
, 1))
4212 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4213 || rtx_equal_p (op1
, XEXP (op0
, 1))))
4216 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4217 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4218 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4221 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4222 if ((code
== LTU
|| code
== GEU
)
4223 && GET_CODE (op0
) == PLUS
4224 && rtx_equal_p (op1
, XEXP (op0
, 1))
4225 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4226 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4227 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4228 copy_rtx (XEXP (op0
, 0)));
4230 if (op1
== const0_rtx
)
4232 /* Canonicalize (GTU x 0) as (NE x 0). */
4234 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4235 /* Canonicalize (LEU x 0) as (EQ x 0). */
4237 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4239 else if (op1
== const1_rtx
)
4244 /* Canonicalize (GE x 1) as (GT x 0). */
4245 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4248 /* Canonicalize (GEU x 1) as (NE x 0). */
4249 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4252 /* Canonicalize (LT x 1) as (LE x 0). */
4253 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4256 /* Canonicalize (LTU x 1) as (EQ x 0). */
4257 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4263 else if (op1
== constm1_rtx
)
4265 /* Canonicalize (LE x -1) as (LT x 0). */
4267 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4268 /* Canonicalize (GT x -1) as (GE x 0). */
4270 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4273 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4274 if ((code
== EQ
|| code
== NE
)
4275 && (op0code
== PLUS
|| op0code
== MINUS
)
4277 && CONSTANT_P (XEXP (op0
, 1))
4278 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4280 rtx x
= XEXP (op0
, 0);
4281 rtx c
= XEXP (op0
, 1);
4283 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
4285 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
4288 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4289 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4291 && op1
== const0_rtx
4292 && GET_MODE_CLASS (mode
) == MODE_INT
4293 && cmp_mode
!= VOIDmode
4294 /* ??? Work-around BImode bugs in the ia64 backend. */
4296 && cmp_mode
!= BImode
4297 && nonzero_bits (op0
, cmp_mode
) == 1
4298 && STORE_FLAG_VALUE
== 1)
4299 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4300 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4301 : lowpart_subreg (mode
, op0
, cmp_mode
);
4303 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4304 if ((code
== EQ
|| code
== NE
)
4305 && op1
== const0_rtx
4307 return simplify_gen_relational (code
, mode
, cmp_mode
,
4308 XEXP (op0
, 0), XEXP (op0
, 1));
4310 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4311 if ((code
== EQ
|| code
== NE
)
4313 && rtx_equal_p (XEXP (op0
, 0), op1
)
4314 && !side_effects_p (XEXP (op0
, 0)))
4315 return simplify_gen_relational (code
, mode
, cmp_mode
,
4316 XEXP (op0
, 1), const0_rtx
);
4318 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4319 if ((code
== EQ
|| code
== NE
)
4321 && rtx_equal_p (XEXP (op0
, 1), op1
)
4322 && !side_effects_p (XEXP (op0
, 1)))
4323 return simplify_gen_relational (code
, mode
, cmp_mode
,
4324 XEXP (op0
, 0), const0_rtx
);
4326 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4327 if ((code
== EQ
|| code
== NE
)
4329 && (CONST_INT_P (op1
)
4330 || GET_CODE (op1
) == CONST_DOUBLE
)
4331 && (CONST_INT_P (XEXP (op0
, 1))
4332 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
4333 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4334 simplify_gen_binary (XOR
, cmp_mode
,
4335 XEXP (op0
, 1), op1
));
4337 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4343 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4344 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4345 XEXP (op0
, 0), const0_rtx
);
4350 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4351 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4352 XEXP (op0
, 0), const0_rtx
);
4371 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4372 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4373 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4374 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4375 For floating-point comparisons, assume that the operands were ordered. */
4378 comparison_result (enum rtx_code code
, int known_results
)
4384 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4387 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4391 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4394 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4398 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4401 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4404 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4406 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4409 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4411 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4414 return const_true_rtx
;
4422 /* Check if the given comparison (done in the given MODE) is actually a
4423 tautology or a contradiction.
4424 If no simplification is possible, this function returns zero.
4425 Otherwise, it returns either const_true_rtx or const0_rtx. */
4428 simplify_const_relational_operation (enum rtx_code code
,
4429 enum machine_mode mode
,
4436 gcc_assert (mode
!= VOIDmode
4437 || (GET_MODE (op0
) == VOIDmode
4438 && GET_MODE (op1
) == VOIDmode
));
4440 /* If op0 is a compare, extract the comparison arguments from it. */
4441 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4443 op1
= XEXP (op0
, 1);
4444 op0
= XEXP (op0
, 0);
4446 if (GET_MODE (op0
) != VOIDmode
)
4447 mode
= GET_MODE (op0
);
4448 else if (GET_MODE (op1
) != VOIDmode
)
4449 mode
= GET_MODE (op1
);
4454 /* We can't simplify MODE_CC values since we don't know what the
4455 actual comparison is. */
4456 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4459 /* Make sure the constant is second. */
4460 if (swap_commutative_operands_p (op0
, op1
))
4462 tem
= op0
, op0
= op1
, op1
= tem
;
4463 code
= swap_condition (code
);
4466 trueop0
= avoid_constant_pool_reference (op0
);
4467 trueop1
= avoid_constant_pool_reference (op1
);
4469 /* For integer comparisons of A and B maybe we can simplify A - B and can
4470 then simplify a comparison of that with zero. If A and B are both either
4471 a register or a CONST_INT, this can't help; testing for these cases will
4472 prevent infinite recursion here and speed things up.
4474 We can only do this for EQ and NE comparisons as otherwise we may
4475 lose or introduce overflow which we cannot disregard as undefined as
4476 we do not know the signedness of the operation on either the left or
4477 the right hand side of the comparison. */
4479 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4480 && (code
== EQ
|| code
== NE
)
4481 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4482 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4483 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4484 /* We cannot do this if tem is a nonzero address. */
4485 && ! nonzero_address_p (tem
))
4486 return simplify_const_relational_operation (signed_condition (code
),
4487 mode
, tem
, const0_rtx
);
4489 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4490 return const_true_rtx
;
4492 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4495 /* For modes without NaNs, if the two operands are equal, we know the
4496 result except if they have side-effects. Even with NaNs we know
4497 the result of unordered comparisons and, if signaling NaNs are
4498 irrelevant, also the result of LT/GT/LTGT. */
4499 if ((! HONOR_NANS (GET_MODE (trueop0
))
4500 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4501 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4502 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4503 && rtx_equal_p (trueop0
, trueop1
)
4504 && ! side_effects_p (trueop0
))
4505 return comparison_result (code
, CMP_EQ
);
4507 /* If the operands are floating-point constants, see if we can fold
4509 if (GET_CODE (trueop0
) == CONST_DOUBLE
4510 && GET_CODE (trueop1
) == CONST_DOUBLE
4511 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4513 REAL_VALUE_TYPE d0
, d1
;
4515 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4516 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4518 /* Comparisons are unordered iff at least one of the values is NaN. */
4519 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4529 return const_true_rtx
;
4542 return comparison_result (code
,
4543 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4544 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4547 /* Otherwise, see if the operands are both integers. */
4548 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4549 && (GET_CODE (trueop0
) == CONST_DOUBLE
4550 || CONST_INT_P (trueop0
))
4551 && (GET_CODE (trueop1
) == CONST_DOUBLE
4552 || CONST_INT_P (trueop1
)))
4554 int width
= GET_MODE_BITSIZE (mode
);
4555 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4556 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4558 /* Get the two words comprising each integer constant. */
4559 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
4561 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4562 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4566 l0u
= l0s
= INTVAL (trueop0
);
4567 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4570 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4572 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4573 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4577 l1u
= l1s
= INTVAL (trueop1
);
4578 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4581 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4582 we have to sign or zero-extend the values. */
4583 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4585 l0u
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
4586 l1u
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
4588 if (l0s
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)))
4589 l0s
|= ((unsigned HOST_WIDE_INT
) (-1) << width
);
4591 if (l1s
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)))
4592 l1s
|= ((unsigned HOST_WIDE_INT
) (-1) << width
);
4594 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4595 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4597 if (h0u
== h1u
&& l0u
== l1u
)
4598 return comparison_result (code
, CMP_EQ
);
4602 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4603 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4604 return comparison_result (code
, cr
);
4608 /* Optimize comparisons with upper and lower bounds. */
4609 if (SCALAR_INT_MODE_P (mode
)
4610 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
4611 && CONST_INT_P (trueop1
))
4614 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4615 HOST_WIDE_INT val
= INTVAL (trueop1
);
4616 HOST_WIDE_INT mmin
, mmax
;
4626 /* Get a reduced range if the sign bit is zero. */
4627 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4634 rtx mmin_rtx
, mmax_rtx
;
4635 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4637 mmin
= INTVAL (mmin_rtx
);
4638 mmax
= INTVAL (mmax_rtx
);
4641 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4643 mmin
>>= (sign_copies
- 1);
4644 mmax
>>= (sign_copies
- 1);
4650 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4652 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4653 return const_true_rtx
;
4654 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4659 return const_true_rtx
;
4664 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4666 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4667 return const_true_rtx
;
4668 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4673 return const_true_rtx
;
4679 /* x == y is always false for y out of range. */
4680 if (val
< mmin
|| val
> mmax
)
4684 /* x > y is always false for y >= mmax, always true for y < mmin. */
4686 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4688 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4689 return const_true_rtx
;
4695 return const_true_rtx
;
4698 /* x < y is always false for y <= mmin, always true for y > mmax. */
4700 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4702 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4703 return const_true_rtx
;
4709 return const_true_rtx
;
4713 /* x != y is always true for y out of range. */
4714 if (val
< mmin
|| val
> mmax
)
4715 return const_true_rtx
;
4723 /* Optimize integer comparisons with zero. */
4724 if (trueop1
== const0_rtx
)
4726 /* Some addresses are known to be nonzero. We don't know
4727 their sign, but equality comparisons are known. */
4728 if (nonzero_address_p (trueop0
))
4730 if (code
== EQ
|| code
== LEU
)
4732 if (code
== NE
|| code
== GTU
)
4733 return const_true_rtx
;
4736 /* See if the first operand is an IOR with a constant. If so, we
4737 may be able to determine the result of this comparison. */
4738 if (GET_CODE (op0
) == IOR
)
4740 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4741 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4743 int sign_bitnum
= GET_MODE_BITSIZE (mode
) - 1;
4744 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4745 && (UINTVAL (inner_const
)
4746 & ((unsigned HOST_WIDE_INT
) 1
4756 return const_true_rtx
;
4760 return const_true_rtx
;
4774 /* Optimize comparison of ABS with zero. */
4775 if (trueop1
== CONST0_RTX (mode
)
4776 && (GET_CODE (trueop0
) == ABS
4777 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4778 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4783 /* Optimize abs(x) < 0.0. */
4784 if (!HONOR_SNANS (mode
)
4785 && (!INTEGRAL_MODE_P (mode
)
4786 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4788 if (INTEGRAL_MODE_P (mode
)
4789 && (issue_strict_overflow_warning
4790 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4791 warning (OPT_Wstrict_overflow
,
4792 ("assuming signed overflow does not occur when "
4793 "assuming abs (x) < 0 is false"));
4799 /* Optimize abs(x) >= 0.0. */
4800 if (!HONOR_NANS (mode
)
4801 && (!INTEGRAL_MODE_P (mode
)
4802 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4804 if (INTEGRAL_MODE_P (mode
)
4805 && (issue_strict_overflow_warning
4806 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4807 warning (OPT_Wstrict_overflow
,
4808 ("assuming signed overflow does not occur when "
4809 "assuming abs (x) >= 0 is true"));
4810 return const_true_rtx
;
4815 /* Optimize ! (abs(x) < 0.0). */
4816 return const_true_rtx
;
4826 /* Simplify CODE, an operation with result mode MODE and three operands,
4827 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4828 a constant. Return 0 if no simplifications is possible. */
4831 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4832 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4835 unsigned int width
= GET_MODE_BITSIZE (mode
);
4836 bool any_change
= false;
4839 /* VOIDmode means "infinite" precision. */
4841 width
= HOST_BITS_PER_WIDE_INT
;
4846 /* Simplify negations around the multiplication. */
4847 /* -a * -b + c => a * b + c. */
4848 if (GET_CODE (op0
) == NEG
)
4850 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
4852 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
4854 else if (GET_CODE (op1
) == NEG
)
4856 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
4858 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
4861 /* Canonicalize the two multiplication operands. */
4862 /* a * -b + c => -b * a + c. */
4863 if (swap_commutative_operands_p (op0
, op1
))
4864 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
4867 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
4872 if (CONST_INT_P (op0
)
4873 && CONST_INT_P (op1
)
4874 && CONST_INT_P (op2
)
4875 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4876 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4878 /* Extracting a bit-field from a constant */
4879 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
4881 if (BITS_BIG_ENDIAN
)
4882 val
>>= GET_MODE_BITSIZE (op0_mode
) - INTVAL (op2
) - INTVAL (op1
);
4884 val
>>= INTVAL (op2
);
4886 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4888 /* First zero-extend. */
4889 val
&= ((unsigned HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4890 /* If desired, propagate sign bit. */
4891 if (code
== SIGN_EXTRACT
4892 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1)))
4894 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4897 /* Clear the bits that don't belong in our mode,
4898 unless they and our sign bit are all one.
4899 So we get either a reasonable negative value or a reasonable
4900 unsigned value for this mode. */
4901 if (width
< HOST_BITS_PER_WIDE_INT
4902 && ((val
& ((unsigned HOST_WIDE_INT
) (-1) << (width
- 1)))
4903 != ((unsigned HOST_WIDE_INT
) (-1) << (width
- 1))))
4904 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
4906 return gen_int_mode (val
, mode
);
4911 if (CONST_INT_P (op0
))
4912 return op0
!= const0_rtx
? op1
: op2
;
4914 /* Convert c ? a : a into "a". */
4915 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4918 /* Convert a != b ? a : b into "a". */
4919 if (GET_CODE (op0
) == NE
4920 && ! side_effects_p (op0
)
4921 && ! HONOR_NANS (mode
)
4922 && ! HONOR_SIGNED_ZEROS (mode
)
4923 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4924 && rtx_equal_p (XEXP (op0
, 1), op2
))
4925 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4926 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4929 /* Convert a == b ? a : b into "b". */
4930 if (GET_CODE (op0
) == EQ
4931 && ! side_effects_p (op0
)
4932 && ! HONOR_NANS (mode
)
4933 && ! HONOR_SIGNED_ZEROS (mode
)
4934 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4935 && rtx_equal_p (XEXP (op0
, 1), op2
))
4936 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4937 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4940 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4942 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4943 ? GET_MODE (XEXP (op0
, 1))
4944 : GET_MODE (XEXP (op0
, 0)));
4947 /* Look for happy constants in op1 and op2. */
4948 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
4950 HOST_WIDE_INT t
= INTVAL (op1
);
4951 HOST_WIDE_INT f
= INTVAL (op2
);
4953 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4954 code
= GET_CODE (op0
);
4955 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4958 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4966 return simplify_gen_relational (code
, mode
, cmp_mode
,
4967 XEXP (op0
, 0), XEXP (op0
, 1));
4970 if (cmp_mode
== VOIDmode
)
4971 cmp_mode
= op0_mode
;
4972 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4973 cmp_mode
, XEXP (op0
, 0),
4976 /* See if any simplifications were possible. */
4979 if (CONST_INT_P (temp
))
4980 return temp
== const0_rtx
? op2
: op1
;
4982 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4988 gcc_assert (GET_MODE (op0
) == mode
);
4989 gcc_assert (GET_MODE (op1
) == mode
);
4990 gcc_assert (VECTOR_MODE_P (mode
));
4991 op2
= avoid_constant_pool_reference (op2
);
4992 if (CONST_INT_P (op2
))
4994 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4995 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4996 int mask
= (1 << n_elts
) - 1;
4998 if (!(INTVAL (op2
) & mask
))
5000 if ((INTVAL (op2
) & mask
) == mask
)
5003 op0
= avoid_constant_pool_reference (op0
);
5004 op1
= avoid_constant_pool_reference (op1
);
5005 if (GET_CODE (op0
) == CONST_VECTOR
5006 && GET_CODE (op1
) == CONST_VECTOR
)
5008 rtvec v
= rtvec_alloc (n_elts
);
5011 for (i
= 0; i
< n_elts
; i
++)
5012 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
5013 ? CONST_VECTOR_ELT (op0
, i
)
5014 : CONST_VECTOR_ELT (op1
, i
));
5015 return gen_rtx_CONST_VECTOR (mode
, v
);
5027 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5029 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5031 Works by unpacking OP into a collection of 8-bit values
5032 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5033 and then repacking them again for OUTERMODE. */
5036 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5037 enum machine_mode innermode
, unsigned int byte
)
5039 /* We support up to 512-bit values (for V8DFmode). */
5043 value_mask
= (1 << value_bit
) - 1
5045 unsigned char value
[max_bitsize
/ value_bit
];
5054 rtvec result_v
= NULL
;
5055 enum mode_class outer_class
;
5056 enum machine_mode outer_submode
;
5058 /* Some ports misuse CCmode. */
5059 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5062 /* We have no way to represent a complex constant at the rtl level. */
5063 if (COMPLEX_MODE_P (outermode
))
5066 /* Unpack the value. */
5068 if (GET_CODE (op
) == CONST_VECTOR
)
5070 num_elem
= CONST_VECTOR_NUNITS (op
);
5071 elems
= &CONST_VECTOR_ELT (op
, 0);
5072 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5078 elem_bitsize
= max_bitsize
;
5080 /* If this asserts, it is too complicated; reducing value_bit may help. */
5081 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5082 /* I don't know how to handle endianness of sub-units. */
5083 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5085 for (elem
= 0; elem
< num_elem
; elem
++)
5088 rtx el
= elems
[elem
];
5090 /* Vectors are kept in target memory order. (This is probably
5093 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5094 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5096 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5097 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5098 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5099 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5100 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5103 switch (GET_CODE (el
))
5107 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5109 *vp
++ = INTVAL (el
) >> i
;
5110 /* CONST_INTs are always logically sign-extended. */
5111 for (; i
< elem_bitsize
; i
+= value_bit
)
5112 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5116 if (GET_MODE (el
) == VOIDmode
)
5118 /* If this triggers, someone should have generated a
5119 CONST_INT instead. */
5120 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5122 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5123 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5124 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
5127 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5130 /* It shouldn't matter what's done here, so fill it with
5132 for (; i
< elem_bitsize
; i
+= value_bit
)
5137 long tmp
[max_bitsize
/ 32];
5138 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5140 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5141 gcc_assert (bitsize
<= elem_bitsize
);
5142 gcc_assert (bitsize
% value_bit
== 0);
5144 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5147 /* real_to_target produces its result in words affected by
5148 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5149 and use WORDS_BIG_ENDIAN instead; see the documentation
5150 of SUBREG in rtl.texi. */
5151 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5154 if (WORDS_BIG_ENDIAN
)
5155 ibase
= bitsize
- 1 - i
;
5158 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5161 /* It shouldn't matter what's done here, so fill it with
5163 for (; i
< elem_bitsize
; i
+= value_bit
)
5169 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5171 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5172 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5176 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5177 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5178 for (; i
< 2 * HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5180 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5181 >> (i
- HOST_BITS_PER_WIDE_INT
);
5182 for (; i
< elem_bitsize
; i
+= value_bit
)
5192 /* Now, pick the right byte to start with. */
5193 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5194 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5195 will already have offset 0. */
5196 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5198 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5200 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5201 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5202 byte
= (subword_byte
% UNITS_PER_WORD
5203 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5206 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5207 so if it's become negative it will instead be very large.) */
5208 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5210 /* Convert from bytes to chunks of size value_bit. */
5211 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5213 /* Re-pack the value. */
5215 if (VECTOR_MODE_P (outermode
))
5217 num_elem
= GET_MODE_NUNITS (outermode
);
5218 result_v
= rtvec_alloc (num_elem
);
5219 elems
= &RTVEC_ELT (result_v
, 0);
5220 outer_submode
= GET_MODE_INNER (outermode
);
5226 outer_submode
= outermode
;
5229 outer_class
= GET_MODE_CLASS (outer_submode
);
5230 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5232 gcc_assert (elem_bitsize
% value_bit
== 0);
5233 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5235 for (elem
= 0; elem
< num_elem
; elem
++)
5239 /* Vectors are stored in target memory order. (This is probably
5242 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5243 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5245 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5246 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5247 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5248 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5249 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5252 switch (outer_class
)
5255 case MODE_PARTIAL_INT
:
5257 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5260 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5262 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5263 for (; i
< elem_bitsize
; i
+= value_bit
)
5264 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5265 << (i
- HOST_BITS_PER_WIDE_INT
);
5267 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5269 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5270 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5271 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
5272 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5279 case MODE_DECIMAL_FLOAT
:
5282 long tmp
[max_bitsize
/ 32];
5284 /* real_from_target wants its input in words affected by
5285 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5286 and use WORDS_BIG_ENDIAN instead; see the documentation
5287 of SUBREG in rtl.texi. */
5288 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5290 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5293 if (WORDS_BIG_ENDIAN
)
5294 ibase
= elem_bitsize
- 1 - i
;
5297 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5300 real_from_target (&r
, tmp
, outer_submode
);
5301 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5313 f
.mode
= outer_submode
;
5316 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5318 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5319 for (; i
< elem_bitsize
; i
+= value_bit
)
5320 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5321 << (i
- HOST_BITS_PER_WIDE_INT
));
5323 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5331 if (VECTOR_MODE_P (outermode
))
5332 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5337 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5338 Return 0 if no simplifications are possible. */
5340 simplify_subreg (enum machine_mode outermode
, rtx op
,
5341 enum machine_mode innermode
, unsigned int byte
)
5343 /* Little bit of sanity checking. */
5344 gcc_assert (innermode
!= VOIDmode
);
5345 gcc_assert (outermode
!= VOIDmode
);
5346 gcc_assert (innermode
!= BLKmode
);
5347 gcc_assert (outermode
!= BLKmode
);
5349 gcc_assert (GET_MODE (op
) == innermode
5350 || GET_MODE (op
) == VOIDmode
);
5352 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
5353 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5355 if (outermode
== innermode
&& !byte
)
5358 if (CONST_INT_P (op
)
5359 || GET_CODE (op
) == CONST_DOUBLE
5360 || GET_CODE (op
) == CONST_FIXED
5361 || GET_CODE (op
) == CONST_VECTOR
)
5362 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5364 /* Changing mode twice with SUBREG => just change it once,
5365 or not at all if changing back op starting mode. */
5366 if (GET_CODE (op
) == SUBREG
)
5368 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5369 int final_offset
= byte
+ SUBREG_BYTE (op
);
5372 if (outermode
== innermostmode
5373 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5374 return SUBREG_REG (op
);
5376 /* The SUBREG_BYTE represents offset, as if the value were stored
5377 in memory. Irritating exception is paradoxical subreg, where
5378 we define SUBREG_BYTE to be 0. On big endian machines, this
5379 value should be negative. For a moment, undo this exception. */
5380 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5382 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5383 if (WORDS_BIG_ENDIAN
)
5384 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5385 if (BYTES_BIG_ENDIAN
)
5386 final_offset
+= difference
% UNITS_PER_WORD
;
5388 if (SUBREG_BYTE (op
) == 0
5389 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5391 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5392 if (WORDS_BIG_ENDIAN
)
5393 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5394 if (BYTES_BIG_ENDIAN
)
5395 final_offset
+= difference
% UNITS_PER_WORD
;
5398 /* See whether resulting subreg will be paradoxical. */
5399 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5401 /* In nonparadoxical subregs we can't handle negative offsets. */
5402 if (final_offset
< 0)
5404 /* Bail out in case resulting subreg would be incorrect. */
5405 if (final_offset
% GET_MODE_SIZE (outermode
)
5406 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5412 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5414 /* In paradoxical subreg, see if we are still looking on lower part.
5415 If so, our SUBREG_BYTE will be 0. */
5416 if (WORDS_BIG_ENDIAN
)
5417 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5418 if (BYTES_BIG_ENDIAN
)
5419 offset
+= difference
% UNITS_PER_WORD
;
5420 if (offset
== final_offset
)
5426 /* Recurse for further possible simplifications. */
5427 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5431 if (validate_subreg (outermode
, innermostmode
,
5432 SUBREG_REG (op
), final_offset
))
5434 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5435 if (SUBREG_PROMOTED_VAR_P (op
)
5436 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5437 && GET_MODE_CLASS (outermode
) == MODE_INT
5438 && IN_RANGE (GET_MODE_SIZE (outermode
),
5439 GET_MODE_SIZE (innermode
),
5440 GET_MODE_SIZE (innermostmode
))
5441 && subreg_lowpart_p (newx
))
5443 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5444 SUBREG_PROMOTED_UNSIGNED_SET
5445 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5452 /* Merge implicit and explicit truncations. */
5454 if (GET_CODE (op
) == TRUNCATE
5455 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5456 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5457 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5458 GET_MODE (XEXP (op
, 0)));
5460 /* SUBREG of a hard register => just change the register number
5461 and/or mode. If the hard register is not valid in that mode,
5462 suppress this simplification. If the hard register is the stack,
5463 frame, or argument pointer, leave this as a SUBREG. */
5465 if (REG_P (op
) && HARD_REGISTER_P (op
))
5467 unsigned int regno
, final_regno
;
5470 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5471 if (HARD_REGISTER_NUM_P (final_regno
))
5474 int final_offset
= byte
;
5476 /* Adjust offset for paradoxical subregs. */
5478 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5480 int difference
= (GET_MODE_SIZE (innermode
)
5481 - GET_MODE_SIZE (outermode
));
5482 if (WORDS_BIG_ENDIAN
)
5483 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5484 if (BYTES_BIG_ENDIAN
)
5485 final_offset
+= difference
% UNITS_PER_WORD
;
5488 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5490 /* Propagate original regno. We don't have any way to specify
5491 the offset inside original regno, so do so only for lowpart.
5492 The information is used only by alias analysis that can not
5493 grog partial register anyway. */
5495 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5496 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5501 /* If we have a SUBREG of a register that we are replacing and we are
5502 replacing it with a MEM, make a new MEM and try replacing the
5503 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5504 or if we would be widening it. */
5507 && ! mode_dependent_address_p (XEXP (op
, 0))
5508 /* Allow splitting of volatile memory references in case we don't
5509 have instruction to move the whole thing. */
5510 && (! MEM_VOLATILE_P (op
)
5511 || ! have_insn_for (SET
, innermode
))
5512 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5513 return adjust_address_nv (op
, outermode
, byte
);
5515 /* Handle complex values represented as CONCAT
5516 of real and imaginary part. */
5517 if (GET_CODE (op
) == CONCAT
)
5519 unsigned int part_size
, final_offset
;
5522 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5523 if (byte
< part_size
)
5525 part
= XEXP (op
, 0);
5526 final_offset
= byte
;
5530 part
= XEXP (op
, 1);
5531 final_offset
= byte
- part_size
;
5534 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5537 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5540 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5541 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5545 /* Optimize SUBREG truncations of zero and sign extended values. */
5546 if ((GET_CODE (op
) == ZERO_EXTEND
5547 || GET_CODE (op
) == SIGN_EXTEND
)
5548 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
5550 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5552 /* If we're requesting the lowpart of a zero or sign extension,
5553 there are three possibilities. If the outermode is the same
5554 as the origmode, we can omit both the extension and the subreg.
5555 If the outermode is not larger than the origmode, we can apply
5556 the truncation without the extension. Finally, if the outermode
5557 is larger than the origmode, but both are integer modes, we
5558 can just extend to the appropriate mode. */
5561 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5562 if (outermode
== origmode
)
5563 return XEXP (op
, 0);
5564 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
5565 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5566 subreg_lowpart_offset (outermode
,
5568 if (SCALAR_INT_MODE_P (outermode
))
5569 return simplify_gen_unary (GET_CODE (op
), outermode
,
5570 XEXP (op
, 0), origmode
);
5573 /* A SUBREG resulting from a zero extension may fold to zero if
5574 it extracts higher bits that the ZERO_EXTEND's source bits. */
5575 if (GET_CODE (op
) == ZERO_EXTEND
5576 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
5577 return CONST0_RTX (outermode
);
5580 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5581 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5582 the outer subreg is effectively a truncation to the original mode. */
5583 if ((GET_CODE (op
) == LSHIFTRT
5584 || GET_CODE (op
) == ASHIFTRT
)
5585 && SCALAR_INT_MODE_P (outermode
)
5586 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5587 to avoid the possibility that an outer LSHIFTRT shifts by more
5588 than the sign extension's sign_bit_copies and introduces zeros
5589 into the high bits of the result. */
5590 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
5591 && CONST_INT_P (XEXP (op
, 1))
5592 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5593 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5594 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5595 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5596 return simplify_gen_binary (ASHIFTRT
, outermode
,
5597 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5599 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5600 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5601 the outer subreg is effectively a truncation to the original mode. */
5602 if ((GET_CODE (op
) == LSHIFTRT
5603 || GET_CODE (op
) == ASHIFTRT
)
5604 && SCALAR_INT_MODE_P (outermode
)
5605 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5606 && CONST_INT_P (XEXP (op
, 1))
5607 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5608 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5609 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5610 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5611 return simplify_gen_binary (LSHIFTRT
, outermode
,
5612 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5614 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5615 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5616 the outer subreg is effectively a truncation to the original mode. */
5617 if (GET_CODE (op
) == ASHIFT
5618 && SCALAR_INT_MODE_P (outermode
)
5619 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5620 && CONST_INT_P (XEXP (op
, 1))
5621 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5622 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5623 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5624 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5625 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5626 return simplify_gen_binary (ASHIFT
, outermode
,
5627 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5629 /* Recognize a word extraction from a multi-word subreg. */
5630 if ((GET_CODE (op
) == LSHIFTRT
5631 || GET_CODE (op
) == ASHIFTRT
)
5632 && SCALAR_INT_MODE_P (outermode
)
5633 && GET_MODE_BITSIZE (outermode
) >= BITS_PER_WORD
5634 && GET_MODE_BITSIZE (innermode
) >= (2 * GET_MODE_BITSIZE (outermode
))
5635 && CONST_INT_P (XEXP (op
, 1))
5636 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_BITSIZE (outermode
) - 1)) == 0
5637 && INTVAL (XEXP (op
, 1)) >= 0
5638 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5639 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5641 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5642 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5644 ? byte
- shifted_bytes
5645 : byte
+ shifted_bytes
));
5648 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5649 and try replacing the SUBREG and shift with it. Don't do this if
5650 the MEM has a mode-dependent address or if we would be widening it. */
5652 if ((GET_CODE (op
) == LSHIFTRT
5653 || GET_CODE (op
) == ASHIFTRT
)
5654 && MEM_P (XEXP (op
, 0))
5655 && CONST_INT_P (XEXP (op
, 1))
5656 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (GET_MODE (op
))
5657 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (outermode
)) == 0
5658 && INTVAL (XEXP (op
, 1)) > 0
5659 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5660 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0))
5661 && ! MEM_VOLATILE_P (XEXP (op
, 0))
5662 && byte
== subreg_lowpart_offset (outermode
, innermode
)
5663 && (GET_MODE_SIZE (outermode
) >= UNITS_PER_WORD
5664 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
5666 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5667 return adjust_address_nv (XEXP (op
, 0), outermode
,
5669 ? byte
- shifted_bytes
5670 : byte
+ shifted_bytes
));
5676 /* Make a SUBREG operation or equivalent if it folds. */
5679 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5680 enum machine_mode innermode
, unsigned int byte
)
5684 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5688 if (GET_CODE (op
) == SUBREG
5689 || GET_CODE (op
) == CONCAT
5690 || GET_MODE (op
) == VOIDmode
)
5693 if (validate_subreg (outermode
, innermode
, op
, byte
))
5694 return gen_rtx_SUBREG (outermode
, op
, byte
);
5699 /* Simplify X, an rtx expression.
5701 Return the simplified expression or NULL if no simplifications
5704 This is the preferred entry point into the simplification routines;
5705 however, we still allow passes to call the more specific routines.
5707 Right now GCC has three (yes, three) major bodies of RTL simplification
5708 code that need to be unified.
5710 1. fold_rtx in cse.c. This code uses various CSE specific
5711 information to aid in RTL simplification.
5713 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5714 it uses combine specific information to aid in RTL
5717 3. The routines in this file.
5720 Long term we want to only have one body of simplification code; to
5721 get to that state I recommend the following steps:
5723 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5724 which are not pass dependent state into these routines.
5726 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5727 use this routine whenever possible.
5729 3. Allow for pass dependent state to be provided to these
5730 routines and add simplifications based on the pass dependent
5731 state. Remove code from cse.c & combine.c that becomes
5734 It will take time, but ultimately the compiler will be easier to
5735 maintain and improve. It's totally silly that when we add a
5736 simplification that it needs to be added to 4 places (3 for RTL
5737 simplification and 1 for tree simplification. */
5740 simplify_rtx (const_rtx x
)
5742 const enum rtx_code code
= GET_CODE (x
);
5743 const enum machine_mode mode
= GET_MODE (x
);
5745 switch (GET_RTX_CLASS (code
))
5748 return simplify_unary_operation (code
, mode
,
5749 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5750 case RTX_COMM_ARITH
:
5751 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5752 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5754 /* Fall through.... */
5757 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5760 case RTX_BITFIELD_OPS
:
5761 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5762 XEXP (x
, 0), XEXP (x
, 1),
5766 case RTX_COMM_COMPARE
:
5767 return simplify_relational_operation (code
, mode
,
5768 ((GET_MODE (XEXP (x
, 0))
5770 ? GET_MODE (XEXP (x
, 0))
5771 : GET_MODE (XEXP (x
, 1))),
5777 return simplify_subreg (mode
, SUBREG_REG (x
),
5778 GET_MODE (SUBREG_REG (x
)),
5785 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5786 if (GET_CODE (XEXP (x
, 0)) == HIGH
5787 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))