1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
38 #include "diagnostic-core.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
53 static bool plus_minus_operand_p (const_rtx
);
54 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
61 enum machine_mode
, rtx
, rtx
);
62 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode
, const_rtx i
)
71 return gen_int_mode (- INTVAL (i
), mode
);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
80 unsigned HOST_WIDE_INT val
;
83 if (GET_MODE_CLASS (mode
) != MODE_INT
)
86 width
= GET_MODE_BITSIZE (mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
93 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x
) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x
) == 0)
97 val
= CONST_DOUBLE_HIGH (x
);
98 width
-= HOST_BITS_PER_WIDE_INT
;
103 if (width
< HOST_BITS_PER_WIDE_INT
)
104 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
105 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
117 /* If this simplifies, do it. */
118 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0
, op1
))
125 tem
= op0
, op0
= op1
, op1
= tem
;
127 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x
)
136 enum machine_mode cmode
;
137 HOST_WIDE_INT offset
= 0;
139 switch (GET_CODE (x
))
145 /* Handle float extensions of constant pool references. */
147 c
= avoid_constant_pool_reference (tmp
);
148 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
152 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
161 if (GET_MODE (x
) == BLKmode
)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr
= targetm
.delegitimize_address (addr
);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr
) == CONST
171 && GET_CODE (XEXP (addr
, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
174 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
175 addr
= XEXP (XEXP (addr
, 0), 0);
178 if (GET_CODE (addr
) == LO_SUM
)
179 addr
= XEXP (addr
, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr
) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr
))
186 c
= get_pool_constant (addr
);
187 cmode
= get_pool_mode (addr
);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset
!= 0 || cmode
!= GET_MODE (x
))
194 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
195 if (tem
&& CONSTANT_P (tem
))
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x
)
212 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
213 use their base addresses as equivalent. */
218 tree decl
= MEM_EXPR (x
);
219 enum machine_mode mode
= GET_MODE (x
);
220 HOST_WIDE_INT offset
= 0;
222 switch (TREE_CODE (decl
))
232 case ARRAY_RANGE_REF
:
237 case VIEW_CONVERT_EXPR
:
239 HOST_WIDE_INT bitsize
, bitpos
;
241 int unsignedp
= 0, volatilep
= 0;
243 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
244 &mode
, &unsignedp
, &volatilep
, false);
245 if (bitsize
!= GET_MODE_BITSIZE (mode
)
246 || (bitpos
% BITS_PER_UNIT
)
247 || (toffset
&& !host_integerp (toffset
, 0)))
251 offset
+= bitpos
/ BITS_PER_UNIT
;
253 offset
+= TREE_INT_CST_LOW (toffset
);
260 && mode
== GET_MODE (x
)
261 && TREE_CODE (decl
) == VAR_DECL
262 && (TREE_STATIC (decl
)
263 || DECL_THREAD_LOCAL_P (decl
))
264 && DECL_RTL_SET_P (decl
)
265 && MEM_P (DECL_RTL (decl
)))
269 offset
+= INTVAL (MEM_OFFSET (x
));
271 newx
= DECL_RTL (decl
);
275 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
284 || (GET_CODE (o
) == PLUS
285 && GET_CODE (XEXP (o
, 1)) == CONST_INT
286 && (offset
== INTVAL (XEXP (o
, 1))
287 || (GET_CODE (n
) == PLUS
288 && GET_CODE (XEXP (n
, 1)) == CONST_INT
289 && (INTVAL (XEXP (n
, 1)) + offset
290 == INTVAL (XEXP (o
, 1)))
291 && (n
= XEXP (n
, 0))))
292 && (o
= XEXP (o
, 0))))
293 && rtx_equal_p (o
, n
)))
294 x
= adjust_address_nv (newx
, mode
, offset
);
296 else if (GET_MODE (x
) == GET_MODE (newx
)
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
310 enum machine_mode op_mode
)
314 /* If this simplifies, use it. */
315 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
318 return gen_rtx_fmt_e (code
, mode
, op
);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
325 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
329 /* If this simplifies, use it. */
330 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
334 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
342 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
346 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
350 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
353 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
354 and simplify the result. If FN is non-NULL, call this callback on each
355 X, if it returns non-NULL, replace X with its return value and simplify the
359 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
360 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
362 enum rtx_code code
= GET_CODE (x
);
363 enum machine_mode mode
= GET_MODE (x
);
364 enum machine_mode op_mode
;
366 rtx op0
, op1
, op2
, newx
, op
;
370 if (__builtin_expect (fn
!= NULL
, 0))
372 newx
= fn (x
, old_rtx
, data
);
376 else if (rtx_equal_p (x
, old_rtx
))
377 return copy_rtx ((rtx
) data
);
379 switch (GET_RTX_CLASS (code
))
383 op_mode
= GET_MODE (op0
);
384 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
385 if (op0
== XEXP (x
, 0))
387 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
391 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
392 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
393 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
395 return simplify_gen_binary (code
, mode
, op0
, op1
);
398 case RTX_COMM_COMPARE
:
401 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
402 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
403 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
404 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
406 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
409 case RTX_BITFIELD_OPS
:
411 op_mode
= GET_MODE (op0
);
412 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
413 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
414 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
415 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
417 if (op_mode
== VOIDmode
)
418 op_mode
= GET_MODE (op0
);
419 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
424 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
425 if (op0
== SUBREG_REG (x
))
427 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
428 GET_MODE (SUBREG_REG (x
)),
430 return op0
? op0
: x
;
437 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
438 if (op0
== XEXP (x
, 0))
440 return replace_equiv_address_nv (x
, op0
);
442 else if (code
== LO_SUM
)
444 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
445 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
447 /* (lo_sum (high x) x) -> x */
448 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
451 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
453 return gen_rtx_LO_SUM (mode
, op0
, op1
);
462 fmt
= GET_RTX_FORMAT (code
);
463 for (i
= 0; fmt
[i
]; i
++)
468 newvec
= XVEC (newx
, i
);
469 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
471 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
473 if (op
!= RTVEC_ELT (vec
, j
))
477 newvec
= shallow_copy_rtvec (vec
);
479 newx
= shallow_copy_rtx (x
);
480 XVEC (newx
, i
) = newvec
;
482 RTVEC_ELT (newvec
, j
) = op
;
490 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
491 if (op
!= XEXP (x
, i
))
494 newx
= shallow_copy_rtx (x
);
503 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
504 resulting RTX. Return a new RTX which is as simplified as possible. */
507 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
509 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
512 /* Try to simplify a unary operation CODE whose output mode is to be
513 MODE with input operand OP whose mode was originally OP_MODE.
514 Return zero if no simplification can be made. */
516 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
517 rtx op
, enum machine_mode op_mode
)
521 trueop
= avoid_constant_pool_reference (op
);
523 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
527 return simplify_unary_operation_1 (code
, mode
, op
);
530 /* Perform some simplifications we can do even if the operands
533 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
535 enum rtx_code reversed
;
541 /* (not (not X)) == X. */
542 if (GET_CODE (op
) == NOT
)
545 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
546 comparison is all ones. */
547 if (COMPARISON_P (op
)
548 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
549 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
550 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
551 XEXP (op
, 0), XEXP (op
, 1));
553 /* (not (plus X -1)) can become (neg X). */
554 if (GET_CODE (op
) == PLUS
555 && XEXP (op
, 1) == constm1_rtx
)
556 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
558 /* Similarly, (not (neg X)) is (plus X -1). */
559 if (GET_CODE (op
) == NEG
)
560 return plus_constant (XEXP (op
, 0), -1);
562 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
563 if (GET_CODE (op
) == XOR
564 && CONST_INT_P (XEXP (op
, 1))
565 && (temp
= simplify_unary_operation (NOT
, mode
,
566 XEXP (op
, 1), mode
)) != 0)
567 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
569 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
570 if (GET_CODE (op
) == PLUS
571 && CONST_INT_P (XEXP (op
, 1))
572 && mode_signbit_p (mode
, XEXP (op
, 1))
573 && (temp
= simplify_unary_operation (NOT
, mode
,
574 XEXP (op
, 1), mode
)) != 0)
575 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
578 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
579 operands other than 1, but that is not valid. We could do a
580 similar simplification for (not (lshiftrt C X)) where C is
581 just the sign bit, but this doesn't seem common enough to
583 if (GET_CODE (op
) == ASHIFT
584 && XEXP (op
, 0) == const1_rtx
)
586 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
587 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
590 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
591 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
592 so we can perform the above simplification. */
594 if (STORE_FLAG_VALUE
== -1
595 && GET_CODE (op
) == ASHIFTRT
596 && GET_CODE (XEXP (op
, 1))
597 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
598 return simplify_gen_relational (GE
, mode
, VOIDmode
,
599 XEXP (op
, 0), const0_rtx
);
602 if (GET_CODE (op
) == SUBREG
603 && subreg_lowpart_p (op
)
604 && (GET_MODE_SIZE (GET_MODE (op
))
605 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
606 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
607 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
609 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
612 x
= gen_rtx_ROTATE (inner_mode
,
613 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
615 XEXP (SUBREG_REG (op
), 1));
616 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
619 /* Apply De Morgan's laws to reduce number of patterns for machines
620 with negating logical insns (and-not, nand, etc.). If result has
621 only one NOT, put it first, since that is how the patterns are
624 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
626 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
627 enum machine_mode op_mode
;
629 op_mode
= GET_MODE (in1
);
630 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
632 op_mode
= GET_MODE (in2
);
633 if (op_mode
== VOIDmode
)
635 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
637 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
640 in2
= in1
; in1
= tem
;
643 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
649 /* (neg (neg X)) == X. */
650 if (GET_CODE (op
) == NEG
)
653 /* (neg (plus X 1)) can become (not X). */
654 if (GET_CODE (op
) == PLUS
655 && XEXP (op
, 1) == const1_rtx
)
656 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
658 /* Similarly, (neg (not X)) is (plus X 1). */
659 if (GET_CODE (op
) == NOT
)
660 return plus_constant (XEXP (op
, 0), 1);
662 /* (neg (minus X Y)) can become (minus Y X). This transformation
663 isn't safe for modes with signed zeros, since if X and Y are
664 both +0, (minus Y X) is the same as (minus X Y). If the
665 rounding mode is towards +infinity (or -infinity) then the two
666 expressions will be rounded differently. */
667 if (GET_CODE (op
) == MINUS
668 && !HONOR_SIGNED_ZEROS (mode
)
669 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
670 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
672 if (GET_CODE (op
) == PLUS
673 && !HONOR_SIGNED_ZEROS (mode
)
674 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
676 /* (neg (plus A C)) is simplified to (minus -C A). */
677 if (CONST_INT_P (XEXP (op
, 1))
678 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
680 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
682 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
685 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
686 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
687 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
690 /* (neg (mult A B)) becomes (mult (neg A) B).
691 This works even for floating-point values. */
692 if (GET_CODE (op
) == MULT
693 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
695 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
696 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
699 /* NEG commutes with ASHIFT since it is multiplication. Only do
700 this if we can then eliminate the NEG (e.g., if the operand
702 if (GET_CODE (op
) == ASHIFT
)
704 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
706 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
709 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
710 C is equal to the width of MODE minus 1. */
711 if (GET_CODE (op
) == ASHIFTRT
712 && CONST_INT_P (XEXP (op
, 1))
713 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
714 return simplify_gen_binary (LSHIFTRT
, mode
,
715 XEXP (op
, 0), XEXP (op
, 1));
717 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
718 C is equal to the width of MODE minus 1. */
719 if (GET_CODE (op
) == LSHIFTRT
720 && CONST_INT_P (XEXP (op
, 1))
721 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
722 return simplify_gen_binary (ASHIFTRT
, mode
,
723 XEXP (op
, 0), XEXP (op
, 1));
725 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
726 if (GET_CODE (op
) == XOR
727 && XEXP (op
, 1) == const1_rtx
728 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
729 return plus_constant (XEXP (op
, 0), -1);
731 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
732 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
733 if (GET_CODE (op
) == LT
734 && XEXP (op
, 1) == const0_rtx
735 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
737 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
738 int isize
= GET_MODE_BITSIZE (inner
);
739 if (STORE_FLAG_VALUE
== 1)
741 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
742 GEN_INT (isize
- 1));
745 if (GET_MODE_BITSIZE (mode
) > isize
)
746 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
747 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
749 else if (STORE_FLAG_VALUE
== -1)
751 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
752 GEN_INT (isize
- 1));
755 if (GET_MODE_BITSIZE (mode
) > isize
)
756 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
757 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
763 /* We can't handle truncation to a partial integer mode here
764 because we don't know the real bitsize of the partial
766 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
769 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
770 if ((GET_CODE (op
) == SIGN_EXTEND
771 || GET_CODE (op
) == ZERO_EXTEND
)
772 && GET_MODE (XEXP (op
, 0)) == mode
)
775 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
776 (OP:SI foo:SI) if OP is NEG or ABS. */
777 if ((GET_CODE (op
) == ABS
778 || GET_CODE (op
) == NEG
)
779 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
780 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
781 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
782 return simplify_gen_unary (GET_CODE (op
), mode
,
783 XEXP (XEXP (op
, 0), 0), mode
);
785 /* (truncate:A (subreg:B (truncate:C X) 0)) is
787 if (GET_CODE (op
) == SUBREG
788 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
789 && subreg_lowpart_p (op
))
790 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
791 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
793 /* If we know that the value is already truncated, we can
794 replace the TRUNCATE with a SUBREG. Note that this is also
795 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
796 modes we just have to apply a different definition for
797 truncation. But don't do this for an (LSHIFTRT (MULT ...))
798 since this will cause problems with the umulXi3_highpart
800 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
801 GET_MODE_BITSIZE (GET_MODE (op
)))
802 ? (num_sign_bit_copies (op
, GET_MODE (op
))
803 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op
))
804 - GET_MODE_BITSIZE (mode
)))
805 : truncated_to_mode (mode
, op
))
806 && ! (GET_CODE (op
) == LSHIFTRT
807 && GET_CODE (XEXP (op
, 0)) == MULT
))
808 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
810 /* A truncate of a comparison can be replaced with a subreg if
811 STORE_FLAG_VALUE permits. This is like the previous test,
812 but it works even if the comparison is done in a mode larger
813 than HOST_BITS_PER_WIDE_INT. */
814 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
816 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
817 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
821 if (DECIMAL_FLOAT_MODE_P (mode
))
824 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
825 if (GET_CODE (op
) == FLOAT_EXTEND
826 && GET_MODE (XEXP (op
, 0)) == mode
)
829 /* (float_truncate:SF (float_truncate:DF foo:XF))
830 = (float_truncate:SF foo:XF).
831 This may eliminate double rounding, so it is unsafe.
833 (float_truncate:SF (float_extend:XF foo:DF))
834 = (float_truncate:SF foo:DF).
836 (float_truncate:DF (float_extend:XF foo:SF))
837 = (float_extend:SF foo:DF). */
838 if ((GET_CODE (op
) == FLOAT_TRUNCATE
839 && flag_unsafe_math_optimizations
)
840 || GET_CODE (op
) == FLOAT_EXTEND
)
841 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
843 > GET_MODE_SIZE (mode
)
844 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
848 /* (float_truncate (float x)) is (float x) */
849 if (GET_CODE (op
) == FLOAT
850 && (flag_unsafe_math_optimizations
851 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
852 && ((unsigned)significand_size (GET_MODE (op
))
853 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
854 - num_sign_bit_copies (XEXP (op
, 0),
855 GET_MODE (XEXP (op
, 0))))))))
856 return simplify_gen_unary (FLOAT
, mode
,
858 GET_MODE (XEXP (op
, 0)));
860 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
861 (OP:SF foo:SF) if OP is NEG or ABS. */
862 if ((GET_CODE (op
) == ABS
863 || GET_CODE (op
) == NEG
)
864 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
865 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
866 return simplify_gen_unary (GET_CODE (op
), mode
,
867 XEXP (XEXP (op
, 0), 0), mode
);
869 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
870 is (float_truncate:SF x). */
871 if (GET_CODE (op
) == SUBREG
872 && subreg_lowpart_p (op
)
873 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
874 return SUBREG_REG (op
);
878 if (DECIMAL_FLOAT_MODE_P (mode
))
881 /* (float_extend (float_extend x)) is (float_extend x)
883 (float_extend (float x)) is (float x) assuming that double
884 rounding can't happen.
886 if (GET_CODE (op
) == FLOAT_EXTEND
887 || (GET_CODE (op
) == FLOAT
888 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
889 && ((unsigned)significand_size (GET_MODE (op
))
890 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
891 - num_sign_bit_copies (XEXP (op
, 0),
892 GET_MODE (XEXP (op
, 0)))))))
893 return simplify_gen_unary (GET_CODE (op
), mode
,
895 GET_MODE (XEXP (op
, 0)));
900 /* (abs (neg <foo>)) -> (abs <foo>) */
901 if (GET_CODE (op
) == NEG
)
902 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
903 GET_MODE (XEXP (op
, 0)));
905 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
907 if (GET_MODE (op
) == VOIDmode
)
910 /* If operand is something known to be positive, ignore the ABS. */
911 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
912 || ((GET_MODE_BITSIZE (GET_MODE (op
))
913 <= HOST_BITS_PER_WIDE_INT
)
914 && ((nonzero_bits (op
, GET_MODE (op
))
915 & ((unsigned HOST_WIDE_INT
) 1
916 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
920 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
921 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
922 return gen_rtx_NEG (mode
, op
);
927 /* (ffs (*_extend <X>)) = (ffs <X>) */
928 if (GET_CODE (op
) == SIGN_EXTEND
929 || GET_CODE (op
) == ZERO_EXTEND
)
930 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
931 GET_MODE (XEXP (op
, 0)));
935 switch (GET_CODE (op
))
939 /* (popcount (zero_extend <X>)) = (popcount <X>) */
940 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
941 GET_MODE (XEXP (op
, 0)));
945 /* Rotations don't affect popcount. */
946 if (!side_effects_p (XEXP (op
, 1)))
947 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
948 GET_MODE (XEXP (op
, 0)));
957 switch (GET_CODE (op
))
963 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
964 GET_MODE (XEXP (op
, 0)));
968 /* Rotations don't affect parity. */
969 if (!side_effects_p (XEXP (op
, 1)))
970 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
971 GET_MODE (XEXP (op
, 0)));
980 /* (bswap (bswap x)) -> x. */
981 if (GET_CODE (op
) == BSWAP
)
986 /* (float (sign_extend <X>)) = (float <X>). */
987 if (GET_CODE (op
) == SIGN_EXTEND
)
988 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
989 GET_MODE (XEXP (op
, 0)));
993 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
994 becomes just the MINUS if its mode is MODE. This allows
995 folding switch statements on machines using casesi (such as
997 if (GET_CODE (op
) == TRUNCATE
998 && GET_MODE (XEXP (op
, 0)) == mode
999 && GET_CODE (XEXP (op
, 0)) == MINUS
1000 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1001 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1002 return XEXP (op
, 0);
1004 /* Check for a sign extension of a subreg of a promoted
1005 variable, where the promotion is sign-extended, and the
1006 target mode is the same as the variable's promotion. */
1007 if (GET_CODE (op
) == SUBREG
1008 && SUBREG_PROMOTED_VAR_P (op
)
1009 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1010 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1011 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1013 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1014 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1015 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1017 gcc_assert (GET_MODE_BITSIZE (mode
)
1018 > GET_MODE_BITSIZE (GET_MODE (op
)));
1019 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1020 GET_MODE (XEXP (op
, 0)));
1023 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1024 is (sign_extend:M (subreg:O <X>)) if there is mode with
1025 GET_MODE_BITSIZE (N) - I bits.
1026 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1027 is similarly (zero_extend:M (subreg:O <X>)). */
1028 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1029 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1030 && CONST_INT_P (XEXP (op
, 1))
1031 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1032 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1034 enum machine_mode tmode
1035 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1036 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1037 gcc_assert (GET_MODE_BITSIZE (mode
)
1038 > GET_MODE_BITSIZE (GET_MODE (op
)));
1039 if (tmode
!= BLKmode
)
1042 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1043 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1044 ? SIGN_EXTEND
: ZERO_EXTEND
,
1045 mode
, inner
, tmode
);
1049 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1050 /* As we do not know which address space the pointer is refering to,
1051 we can do this only if the target does not support different pointer
1052 or address modes depending on the address space. */
1053 if (target_default_pointer_address_modes_p ()
1054 && ! POINTERS_EXTEND_UNSIGNED
1055 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1057 || (GET_CODE (op
) == SUBREG
1058 && REG_P (SUBREG_REG (op
))
1059 && REG_POINTER (SUBREG_REG (op
))
1060 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1061 return convert_memory_address (Pmode
, op
);
1066 /* Check for a zero extension of a subreg of a promoted
1067 variable, where the promotion is zero-extended, and the
1068 target mode is the same as the variable's promotion. */
1069 if (GET_CODE (op
) == SUBREG
1070 && SUBREG_PROMOTED_VAR_P (op
)
1071 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1072 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1073 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1075 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1076 if (GET_CODE (op
) == ZERO_EXTEND
)
1077 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1078 GET_MODE (XEXP (op
, 0)));
1080 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1081 is (zero_extend:M (subreg:O <X>)) if there is mode with
1082 GET_MODE_BITSIZE (N) - I bits. */
1083 if (GET_CODE (op
) == LSHIFTRT
1084 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1085 && CONST_INT_P (XEXP (op
, 1))
1086 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1087 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1089 enum machine_mode tmode
1090 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1091 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1092 if (tmode
!= BLKmode
)
1095 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1096 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1100 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1101 /* As we do not know which address space the pointer is refering to,
1102 we can do this only if the target does not support different pointer
1103 or address modes depending on the address space. */
1104 if (target_default_pointer_address_modes_p ()
1105 && POINTERS_EXTEND_UNSIGNED
> 0
1106 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1108 || (GET_CODE (op
) == SUBREG
1109 && REG_P (SUBREG_REG (op
))
1110 && REG_POINTER (SUBREG_REG (op
))
1111 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1112 return convert_memory_address (Pmode
, op
);
1123 /* Try to compute the value of a unary operation CODE whose output mode is to
1124 be MODE with input operand OP whose mode was originally OP_MODE.
1125 Return zero if the value cannot be computed. */
1127 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1128 rtx op
, enum machine_mode op_mode
)
1130 unsigned int width
= GET_MODE_BITSIZE (mode
);
1132 if (code
== VEC_DUPLICATE
)
1134 gcc_assert (VECTOR_MODE_P (mode
));
1135 if (GET_MODE (op
) != VOIDmode
)
1137 if (!VECTOR_MODE_P (GET_MODE (op
)))
1138 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1140 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1143 if (CONST_INT_P (op
) || GET_CODE (op
) == CONST_DOUBLE
1144 || GET_CODE (op
) == CONST_VECTOR
)
1146 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1147 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1148 rtvec v
= rtvec_alloc (n_elts
);
1151 if (GET_CODE (op
) != CONST_VECTOR
)
1152 for (i
= 0; i
< n_elts
; i
++)
1153 RTVEC_ELT (v
, i
) = op
;
1156 enum machine_mode inmode
= GET_MODE (op
);
1157 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1158 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1160 gcc_assert (in_n_elts
< n_elts
);
1161 gcc_assert ((n_elts
% in_n_elts
) == 0);
1162 for (i
= 0; i
< n_elts
; i
++)
1163 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1165 return gen_rtx_CONST_VECTOR (mode
, v
);
1169 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1171 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1172 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1173 enum machine_mode opmode
= GET_MODE (op
);
1174 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1175 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1176 rtvec v
= rtvec_alloc (n_elts
);
1179 gcc_assert (op_n_elts
== n_elts
);
1180 for (i
= 0; i
< n_elts
; i
++)
1182 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1183 CONST_VECTOR_ELT (op
, i
),
1184 GET_MODE_INNER (opmode
));
1187 RTVEC_ELT (v
, i
) = x
;
1189 return gen_rtx_CONST_VECTOR (mode
, v
);
1192 /* The order of these tests is critical so that, for example, we don't
1193 check the wrong mode (input vs. output) for a conversion operation,
1194 such as FIX. At some point, this should be simplified. */
1196 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
1197 && (GET_CODE (op
) == CONST_DOUBLE
|| CONST_INT_P (op
)))
1199 HOST_WIDE_INT hv
, lv
;
1202 if (CONST_INT_P (op
))
1203 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1205 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1207 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1208 d
= real_value_truncate (mode
, d
);
1209 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1211 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
1212 && (GET_CODE (op
) == CONST_DOUBLE
1213 || CONST_INT_P (op
)))
1215 HOST_WIDE_INT hv
, lv
;
1218 if (CONST_INT_P (op
))
1219 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1221 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1223 if (op_mode
== VOIDmode
)
1225 /* We don't know how to interpret negative-looking numbers in
1226 this case, so don't try to fold those. */
1230 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
1233 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1235 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1236 d
= real_value_truncate (mode
, d
);
1237 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1240 if (CONST_INT_P (op
)
1241 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1243 HOST_WIDE_INT arg0
= INTVAL (op
);
1257 val
= (arg0
>= 0 ? arg0
: - arg0
);
1261 arg0
&= GET_MODE_MASK (mode
);
1262 val
= ffs_hwi (arg0
);
1266 arg0
&= GET_MODE_MASK (mode
);
1267 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1270 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1274 arg0
&= GET_MODE_MASK (mode
);
1277 /* Even if the value at zero is undefined, we have to come
1278 up with some replacement. Seems good enough. */
1279 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1280 val
= GET_MODE_BITSIZE (mode
);
1283 val
= ctz_hwi (arg0
);
1287 arg0
&= GET_MODE_MASK (mode
);
1290 val
++, arg0
&= arg0
- 1;
1294 arg0
&= GET_MODE_MASK (mode
);
1297 val
++, arg0
&= arg0
- 1;
1306 for (s
= 0; s
< width
; s
+= 8)
1308 unsigned int d
= width
- s
- 8;
1309 unsigned HOST_WIDE_INT byte
;
1310 byte
= (arg0
>> s
) & 0xff;
1321 /* When zero-extending a CONST_INT, we need to know its
1323 gcc_assert (op_mode
!= VOIDmode
);
1324 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1326 /* If we were really extending the mode,
1327 we would have to distinguish between zero-extension
1328 and sign-extension. */
1329 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1332 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1333 val
= arg0
& ~((unsigned HOST_WIDE_INT
) (-1)
1334 << GET_MODE_BITSIZE (op_mode
));
1340 if (op_mode
== VOIDmode
)
1342 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1344 /* If we were really extending the mode,
1345 we would have to distinguish between zero-extension
1346 and sign-extension. */
1347 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1350 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1353 = arg0
& ~((unsigned HOST_WIDE_INT
) (-1)
1354 << GET_MODE_BITSIZE (op_mode
));
1355 if (val
& ((unsigned HOST_WIDE_INT
) 1
1356 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1358 -= (unsigned HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1366 case FLOAT_TRUNCATE
:
1378 return gen_int_mode (val
, mode
);
1381 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1382 for a DImode operation on a CONST_INT. */
1383 else if (GET_MODE (op
) == VOIDmode
1384 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1385 && (GET_CODE (op
) == CONST_DOUBLE
1386 || CONST_INT_P (op
)))
1388 unsigned HOST_WIDE_INT l1
, lv
;
1389 HOST_WIDE_INT h1
, hv
;
1391 if (GET_CODE (op
) == CONST_DOUBLE
)
1392 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1394 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1404 neg_double (l1
, h1
, &lv
, &hv
);
1409 neg_double (l1
, h1
, &lv
, &hv
);
1419 lv
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (h1
);
1427 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1428 - HOST_BITS_PER_WIDE_INT
;
1430 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1431 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1432 lv
= GET_MODE_BITSIZE (mode
);
1440 lv
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (h1
);
1441 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1442 lv
= GET_MODE_BITSIZE (mode
);
1470 for (s
= 0; s
< width
; s
+= 8)
1472 unsigned int d
= width
- s
- 8;
1473 unsigned HOST_WIDE_INT byte
;
1475 if (s
< HOST_BITS_PER_WIDE_INT
)
1476 byte
= (l1
>> s
) & 0xff;
1478 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1480 if (d
< HOST_BITS_PER_WIDE_INT
)
1483 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1489 /* This is just a change-of-mode, so do nothing. */
1494 gcc_assert (op_mode
!= VOIDmode
);
1496 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1500 lv
= l1
& GET_MODE_MASK (op_mode
);
1504 if (op_mode
== VOIDmode
1505 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1509 lv
= l1
& GET_MODE_MASK (op_mode
);
1510 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1511 && (lv
& ((unsigned HOST_WIDE_INT
) 1
1512 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1513 lv
-= (unsigned HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1515 hv
= HWI_SIGN_EXTEND (lv
);
1526 return immed_double_const (lv
, hv
, mode
);
1529 else if (GET_CODE (op
) == CONST_DOUBLE
1530 && SCALAR_FLOAT_MODE_P (mode
))
1532 REAL_VALUE_TYPE d
, t
;
1533 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1538 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1540 real_sqrt (&t
, mode
, &d
);
1544 d
= real_value_abs (&d
);
1547 d
= real_value_negate (&d
);
1549 case FLOAT_TRUNCATE
:
1550 d
= real_value_truncate (mode
, d
);
1553 /* All this does is change the mode. */
1556 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1563 real_to_target (tmp
, &d
, GET_MODE (op
));
1564 for (i
= 0; i
< 4; i
++)
1566 real_from_target (&d
, tmp
, mode
);
1572 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1575 else if (GET_CODE (op
) == CONST_DOUBLE
1576 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1577 && GET_MODE_CLASS (mode
) == MODE_INT
1578 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1580 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1581 operators are intentionally left unspecified (to ease implementation
1582 by target backends), for consistency, this routine implements the
1583 same semantics for constant folding as used by the middle-end. */
1585 /* This was formerly used only for non-IEEE float.
1586 eggert@twinsun.com says it is safe for IEEE also. */
1587 HOST_WIDE_INT xh
, xl
, th
, tl
;
1588 REAL_VALUE_TYPE x
, t
;
1589 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1593 if (REAL_VALUE_ISNAN (x
))
1596 /* Test against the signed upper bound. */
1597 if (width
> HOST_BITS_PER_WIDE_INT
)
1599 th
= ((unsigned HOST_WIDE_INT
) 1
1600 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1606 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1608 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1609 if (REAL_VALUES_LESS (t
, x
))
1616 /* Test against the signed lower bound. */
1617 if (width
> HOST_BITS_PER_WIDE_INT
)
1619 th
= (unsigned HOST_WIDE_INT
) (-1)
1620 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1626 tl
= (unsigned HOST_WIDE_INT
) (-1) << (width
- 1);
1628 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1629 if (REAL_VALUES_LESS (x
, t
))
1635 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1639 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1642 /* Test against the unsigned upper bound. */
1643 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1648 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1650 th
= ((unsigned HOST_WIDE_INT
) 1
1651 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1657 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1659 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1660 if (REAL_VALUES_LESS (t
, x
))
1667 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1673 return immed_double_const (xl
, xh
, mode
);
1679 /* Subroutine of simplify_binary_operation to simplify a commutative,
1680 associative binary operation CODE with result mode MODE, operating
1681 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1682 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1683 canonicalization is possible. */
1686 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1691 /* Linearize the operator to the left. */
1692 if (GET_CODE (op1
) == code
)
1694 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1695 if (GET_CODE (op0
) == code
)
1697 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1698 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1701 /* "a op (b op c)" becomes "(b op c) op a". */
1702 if (! swap_commutative_operands_p (op1
, op0
))
1703 return simplify_gen_binary (code
, mode
, op1
, op0
);
1710 if (GET_CODE (op0
) == code
)
1712 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1713 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1715 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1716 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1719 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1720 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1722 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1724 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1725 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1727 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1734 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1735 and OP1. Return 0 if no simplification is possible.
1737 Don't use this for relational operations such as EQ or LT.
1738 Use simplify_relational_operation instead. */
1740 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1743 rtx trueop0
, trueop1
;
1746 /* Relational operations don't work here. We must know the mode
1747 of the operands in order to do the comparison correctly.
1748 Assuming a full word can give incorrect results.
1749 Consider comparing 128 with -128 in QImode. */
1750 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1751 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1753 /* Make sure the constant is second. */
1754 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1755 && swap_commutative_operands_p (op0
, op1
))
1757 tem
= op0
, op0
= op1
, op1
= tem
;
1760 trueop0
= avoid_constant_pool_reference (op0
);
1761 trueop1
= avoid_constant_pool_reference (op1
);
1763 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1766 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1769 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1770 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1771 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1772 actual constants. */
1775 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1776 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1778 rtx tem
, reversed
, opleft
, opright
;
1780 unsigned int width
= GET_MODE_BITSIZE (mode
);
1782 /* Even if we can't compute a constant result,
1783 there are some cases worth simplifying. */
1788 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1789 when x is NaN, infinite, or finite and nonzero. They aren't
1790 when x is -0 and the rounding mode is not towards -infinity,
1791 since (-0) + 0 is then 0. */
1792 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1795 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1796 transformations are safe even for IEEE. */
1797 if (GET_CODE (op0
) == NEG
)
1798 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1799 else if (GET_CODE (op1
) == NEG
)
1800 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1802 /* (~a) + 1 -> -a */
1803 if (INTEGRAL_MODE_P (mode
)
1804 && GET_CODE (op0
) == NOT
1805 && trueop1
== const1_rtx
)
1806 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1808 /* Handle both-operands-constant cases. We can only add
1809 CONST_INTs to constants since the sum of relocatable symbols
1810 can't be handled by most assemblers. Don't add CONST_INT
1811 to CONST_INT since overflow won't be computed properly if wider
1812 than HOST_BITS_PER_WIDE_INT. */
1814 if ((GET_CODE (op0
) == CONST
1815 || GET_CODE (op0
) == SYMBOL_REF
1816 || GET_CODE (op0
) == LABEL_REF
)
1817 && CONST_INT_P (op1
))
1818 return plus_constant (op0
, INTVAL (op1
));
1819 else if ((GET_CODE (op1
) == CONST
1820 || GET_CODE (op1
) == SYMBOL_REF
1821 || GET_CODE (op1
) == LABEL_REF
)
1822 && CONST_INT_P (op0
))
1823 return plus_constant (op1
, INTVAL (op0
));
1825 /* See if this is something like X * C - X or vice versa or
1826 if the multiplication is written as a shift. If so, we can
1827 distribute and make a new multiply, shift, or maybe just
1828 have X (if C is 2 in the example above). But don't make
1829 something more expensive than we had before. */
1831 if (SCALAR_INT_MODE_P (mode
))
1833 double_int coeff0
, coeff1
;
1834 rtx lhs
= op0
, rhs
= op1
;
1836 coeff0
= double_int_one
;
1837 coeff1
= double_int_one
;
1839 if (GET_CODE (lhs
) == NEG
)
1841 coeff0
= double_int_minus_one
;
1842 lhs
= XEXP (lhs
, 0);
1844 else if (GET_CODE (lhs
) == MULT
1845 && CONST_INT_P (XEXP (lhs
, 1)))
1847 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
1848 lhs
= XEXP (lhs
, 0);
1850 else if (GET_CODE (lhs
) == ASHIFT
1851 && CONST_INT_P (XEXP (lhs
, 1))
1852 && INTVAL (XEXP (lhs
, 1)) >= 0
1853 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1855 coeff0
= double_int_setbit (double_int_zero
,
1856 INTVAL (XEXP (lhs
, 1)));
1857 lhs
= XEXP (lhs
, 0);
1860 if (GET_CODE (rhs
) == NEG
)
1862 coeff1
= double_int_minus_one
;
1863 rhs
= XEXP (rhs
, 0);
1865 else if (GET_CODE (rhs
) == MULT
1866 && CONST_INT_P (XEXP (rhs
, 1)))
1868 coeff1
= shwi_to_double_int (INTVAL (XEXP (rhs
, 1)));
1869 rhs
= XEXP (rhs
, 0);
1871 else if (GET_CODE (rhs
) == ASHIFT
1872 && CONST_INT_P (XEXP (rhs
, 1))
1873 && INTVAL (XEXP (rhs
, 1)) >= 0
1874 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1876 coeff1
= double_int_setbit (double_int_zero
,
1877 INTVAL (XEXP (rhs
, 1)));
1878 rhs
= XEXP (rhs
, 0);
1881 if (rtx_equal_p (lhs
, rhs
))
1883 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1886 bool speed
= optimize_function_for_speed_p (cfun
);
1888 val
= double_int_add (coeff0
, coeff1
);
1889 coeff
= immed_double_int_const (val
, mode
);
1891 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1892 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
1897 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1898 if ((CONST_INT_P (op1
)
1899 || GET_CODE (op1
) == CONST_DOUBLE
)
1900 && GET_CODE (op0
) == XOR
1901 && (CONST_INT_P (XEXP (op0
, 1))
1902 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1903 && mode_signbit_p (mode
, op1
))
1904 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1905 simplify_gen_binary (XOR
, mode
, op1
,
1908 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1909 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1910 && GET_CODE (op0
) == MULT
1911 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1915 in1
= XEXP (XEXP (op0
, 0), 0);
1916 in2
= XEXP (op0
, 1);
1917 return simplify_gen_binary (MINUS
, mode
, op1
,
1918 simplify_gen_binary (MULT
, mode
,
1922 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1923 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1925 if (COMPARISON_P (op0
)
1926 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1927 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1928 && (reversed
= reversed_comparison (op0
, mode
)))
1930 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1932 /* If one of the operands is a PLUS or a MINUS, see if we can
1933 simplify this by the associative law.
1934 Don't use the associative law for floating point.
1935 The inaccuracy makes it nonassociative,
1936 and subtle programs can break if operations are associated. */
1938 if (INTEGRAL_MODE_P (mode
)
1939 && (plus_minus_operand_p (op0
)
1940 || plus_minus_operand_p (op1
))
1941 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1944 /* Reassociate floating point addition only when the user
1945 specifies associative math operations. */
1946 if (FLOAT_MODE_P (mode
)
1947 && flag_associative_math
)
1949 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1956 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1957 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1958 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1959 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1961 rtx xop00
= XEXP (op0
, 0);
1962 rtx xop10
= XEXP (op1
, 0);
1965 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1967 if (REG_P (xop00
) && REG_P (xop10
)
1968 && GET_MODE (xop00
) == GET_MODE (xop10
)
1969 && REGNO (xop00
) == REGNO (xop10
)
1970 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1971 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1978 /* We can't assume x-x is 0 even with non-IEEE floating point,
1979 but since it is zero except in very strange circumstances, we
1980 will treat it as zero with -ffinite-math-only. */
1981 if (rtx_equal_p (trueop0
, trueop1
)
1982 && ! side_effects_p (op0
)
1983 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
1984 return CONST0_RTX (mode
);
1986 /* Change subtraction from zero into negation. (0 - x) is the
1987 same as -x when x is NaN, infinite, or finite and nonzero.
1988 But if the mode has signed zeros, and does not round towards
1989 -infinity, then 0 - 0 is 0, not -0. */
1990 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1991 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1993 /* (-1 - a) is ~a. */
1994 if (trueop0
== constm1_rtx
)
1995 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1997 /* Subtracting 0 has no effect unless the mode has signed zeros
1998 and supports rounding towards -infinity. In such a case,
2000 if (!(HONOR_SIGNED_ZEROS (mode
)
2001 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2002 && trueop1
== CONST0_RTX (mode
))
2005 /* See if this is something like X * C - X or vice versa or
2006 if the multiplication is written as a shift. If so, we can
2007 distribute and make a new multiply, shift, or maybe just
2008 have X (if C is 2 in the example above). But don't make
2009 something more expensive than we had before. */
2011 if (SCALAR_INT_MODE_P (mode
))
2013 double_int coeff0
, negcoeff1
;
2014 rtx lhs
= op0
, rhs
= op1
;
2016 coeff0
= double_int_one
;
2017 negcoeff1
= double_int_minus_one
;
2019 if (GET_CODE (lhs
) == NEG
)
2021 coeff0
= double_int_minus_one
;
2022 lhs
= XEXP (lhs
, 0);
2024 else if (GET_CODE (lhs
) == MULT
2025 && CONST_INT_P (XEXP (lhs
, 1)))
2027 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
2028 lhs
= XEXP (lhs
, 0);
2030 else if (GET_CODE (lhs
) == ASHIFT
2031 && CONST_INT_P (XEXP (lhs
, 1))
2032 && INTVAL (XEXP (lhs
, 1)) >= 0
2033 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2035 coeff0
= double_int_setbit (double_int_zero
,
2036 INTVAL (XEXP (lhs
, 1)));
2037 lhs
= XEXP (lhs
, 0);
2040 if (GET_CODE (rhs
) == NEG
)
2042 negcoeff1
= double_int_one
;
2043 rhs
= XEXP (rhs
, 0);
2045 else if (GET_CODE (rhs
) == MULT
2046 && CONST_INT_P (XEXP (rhs
, 1)))
2048 negcoeff1
= shwi_to_double_int (-INTVAL (XEXP (rhs
, 1)));
2049 rhs
= XEXP (rhs
, 0);
2051 else if (GET_CODE (rhs
) == ASHIFT
2052 && CONST_INT_P (XEXP (rhs
, 1))
2053 && INTVAL (XEXP (rhs
, 1)) >= 0
2054 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2056 negcoeff1
= double_int_setbit (double_int_zero
,
2057 INTVAL (XEXP (rhs
, 1)));
2058 negcoeff1
= double_int_neg (negcoeff1
);
2059 rhs
= XEXP (rhs
, 0);
2062 if (rtx_equal_p (lhs
, rhs
))
2064 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2067 bool speed
= optimize_function_for_speed_p (cfun
);
2069 val
= double_int_add (coeff0
, negcoeff1
);
2070 coeff
= immed_double_int_const (val
, mode
);
2072 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2073 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
2078 /* (a - (-b)) -> (a + b). True even for IEEE. */
2079 if (GET_CODE (op1
) == NEG
)
2080 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2082 /* (-x - c) may be simplified as (-c - x). */
2083 if (GET_CODE (op0
) == NEG
2084 && (CONST_INT_P (op1
)
2085 || GET_CODE (op1
) == CONST_DOUBLE
))
2087 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2089 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2092 /* Don't let a relocatable value get a negative coeff. */
2093 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2094 return simplify_gen_binary (PLUS
, mode
,
2096 neg_const_int (mode
, op1
));
2098 /* (x - (x & y)) -> (x & ~y) */
2099 if (GET_CODE (op1
) == AND
)
2101 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2103 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2104 GET_MODE (XEXP (op1
, 1)));
2105 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2107 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2109 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2110 GET_MODE (XEXP (op1
, 0)));
2111 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2115 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2116 by reversing the comparison code if valid. */
2117 if (STORE_FLAG_VALUE
== 1
2118 && trueop0
== const1_rtx
2119 && COMPARISON_P (op1
)
2120 && (reversed
= reversed_comparison (op1
, mode
)))
2123 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2124 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2125 && GET_CODE (op1
) == MULT
2126 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2130 in1
= XEXP (XEXP (op1
, 0), 0);
2131 in2
= XEXP (op1
, 1);
2132 return simplify_gen_binary (PLUS
, mode
,
2133 simplify_gen_binary (MULT
, mode
,
2138 /* Canonicalize (minus (neg A) (mult B C)) to
2139 (minus (mult (neg B) C) A). */
2140 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2141 && GET_CODE (op1
) == MULT
2142 && GET_CODE (op0
) == NEG
)
2146 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2147 in2
= XEXP (op1
, 1);
2148 return simplify_gen_binary (MINUS
, mode
,
2149 simplify_gen_binary (MULT
, mode
,
2154 /* If one of the operands is a PLUS or a MINUS, see if we can
2155 simplify this by the associative law. This will, for example,
2156 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2157 Don't use the associative law for floating point.
2158 The inaccuracy makes it nonassociative,
2159 and subtle programs can break if operations are associated. */
2161 if (INTEGRAL_MODE_P (mode
)
2162 && (plus_minus_operand_p (op0
)
2163 || plus_minus_operand_p (op1
))
2164 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2169 if (trueop1
== constm1_rtx
)
2170 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2172 if (GET_CODE (op0
) == NEG
)
2174 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2176 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2178 if (GET_CODE (op1
) == NEG
)
2180 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2182 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2185 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2186 x is NaN, since x * 0 is then also NaN. Nor is it valid
2187 when the mode has signed zeros, since multiplying a negative
2188 number by 0 will give -0, not 0. */
2189 if (!HONOR_NANS (mode
)
2190 && !HONOR_SIGNED_ZEROS (mode
)
2191 && trueop1
== CONST0_RTX (mode
)
2192 && ! side_effects_p (op0
))
2195 /* In IEEE floating point, x*1 is not equivalent to x for
2197 if (!HONOR_SNANS (mode
)
2198 && trueop1
== CONST1_RTX (mode
))
2201 /* Convert multiply by constant power of two into shift unless
2202 we are still generating RTL. This test is a kludge. */
2203 if (CONST_INT_P (trueop1
)
2204 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2205 /* If the mode is larger than the host word size, and the
2206 uppermost bit is set, then this isn't a power of two due
2207 to implicit sign extension. */
2208 && (width
<= HOST_BITS_PER_WIDE_INT
2209 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2210 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2212 /* Likewise for multipliers wider than a word. */
2213 if (GET_CODE (trueop1
) == CONST_DOUBLE
2214 && (GET_MODE (trueop1
) == VOIDmode
2215 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
2216 && GET_MODE (op0
) == mode
2217 && CONST_DOUBLE_LOW (trueop1
) == 0
2218 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
2219 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2220 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2222 /* x*2 is x+x and x*(-1) is -x */
2223 if (GET_CODE (trueop1
) == CONST_DOUBLE
2224 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2225 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2226 && GET_MODE (op0
) == mode
)
2229 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2231 if (REAL_VALUES_EQUAL (d
, dconst2
))
2232 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2234 if (!HONOR_SNANS (mode
)
2235 && REAL_VALUES_EQUAL (d
, dconstm1
))
2236 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2239 /* Optimize -x * -x as x * x. */
2240 if (FLOAT_MODE_P (mode
)
2241 && GET_CODE (op0
) == NEG
2242 && GET_CODE (op1
) == NEG
2243 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2244 && !side_effects_p (XEXP (op0
, 0)))
2245 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2247 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2248 if (SCALAR_FLOAT_MODE_P (mode
)
2249 && GET_CODE (op0
) == ABS
2250 && GET_CODE (op1
) == ABS
2251 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2252 && !side_effects_p (XEXP (op0
, 0)))
2253 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2255 /* Reassociate multiplication, but for floating point MULTs
2256 only when the user specifies unsafe math optimizations. */
2257 if (! FLOAT_MODE_P (mode
)
2258 || flag_unsafe_math_optimizations
)
2260 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2267 if (trueop1
== CONST0_RTX (mode
))
2269 if (CONST_INT_P (trueop1
)
2270 && ((UINTVAL (trueop1
) & GET_MODE_MASK (mode
))
2271 == GET_MODE_MASK (mode
)))
2273 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2275 /* A | (~A) -> -1 */
2276 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2277 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2278 && ! side_effects_p (op0
)
2279 && SCALAR_INT_MODE_P (mode
))
2282 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2283 if (CONST_INT_P (op1
)
2284 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2285 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0)
2288 /* Canonicalize (X & C1) | C2. */
2289 if (GET_CODE (op0
) == AND
2290 && CONST_INT_P (trueop1
)
2291 && CONST_INT_P (XEXP (op0
, 1)))
2293 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2294 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2295 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2297 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2299 && !side_effects_p (XEXP (op0
, 0)))
2302 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2303 if (((c1
|c2
) & mask
) == mask
)
2304 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2306 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2307 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2309 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2310 gen_int_mode (c1
& ~c2
, mode
));
2311 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2315 /* Convert (A & B) | A to A. */
2316 if (GET_CODE (op0
) == AND
2317 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2318 || rtx_equal_p (XEXP (op0
, 1), op1
))
2319 && ! side_effects_p (XEXP (op0
, 0))
2320 && ! side_effects_p (XEXP (op0
, 1)))
2323 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2324 mode size to (rotate A CX). */
2326 if (GET_CODE (op1
) == ASHIFT
2327 || GET_CODE (op1
) == SUBREG
)
2338 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2339 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2340 && CONST_INT_P (XEXP (opleft
, 1))
2341 && CONST_INT_P (XEXP (opright
, 1))
2342 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2343 == GET_MODE_BITSIZE (mode
)))
2344 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2346 /* Same, but for ashift that has been "simplified" to a wider mode
2347 by simplify_shift_const. */
2349 if (GET_CODE (opleft
) == SUBREG
2350 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2351 && GET_CODE (opright
) == LSHIFTRT
2352 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2353 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2354 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2355 && (GET_MODE_SIZE (GET_MODE (opleft
))
2356 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2357 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2358 SUBREG_REG (XEXP (opright
, 0)))
2359 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2360 && CONST_INT_P (XEXP (opright
, 1))
2361 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2362 == GET_MODE_BITSIZE (mode
)))
2363 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2364 XEXP (SUBREG_REG (opleft
), 1));
2366 /* If we have (ior (and (X C1) C2)), simplify this by making
2367 C1 as small as possible if C1 actually changes. */
2368 if (CONST_INT_P (op1
)
2369 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2370 || INTVAL (op1
) > 0)
2371 && GET_CODE (op0
) == AND
2372 && CONST_INT_P (XEXP (op0
, 1))
2373 && CONST_INT_P (op1
)
2374 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2375 return simplify_gen_binary (IOR
, mode
,
2377 (AND
, mode
, XEXP (op0
, 0),
2378 GEN_INT (UINTVAL (XEXP (op0
, 1))
2382 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2383 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2384 the PLUS does not affect any of the bits in OP1: then we can do
2385 the IOR as a PLUS and we can associate. This is valid if OP1
2386 can be safely shifted left C bits. */
2387 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2388 && GET_CODE (XEXP (op0
, 0)) == PLUS
2389 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2390 && CONST_INT_P (XEXP (op0
, 1))
2391 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2393 int count
= INTVAL (XEXP (op0
, 1));
2394 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2396 if (mask
>> count
== INTVAL (trueop1
)
2397 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2398 return simplify_gen_binary (ASHIFTRT
, mode
,
2399 plus_constant (XEXP (op0
, 0), mask
),
2403 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2409 if (trueop1
== CONST0_RTX (mode
))
2411 if (CONST_INT_P (trueop1
)
2412 && ((UINTVAL (trueop1
) & GET_MODE_MASK (mode
))
2413 == GET_MODE_MASK (mode
)))
2414 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2415 if (rtx_equal_p (trueop0
, trueop1
)
2416 && ! side_effects_p (op0
)
2417 && GET_MODE_CLASS (mode
) != MODE_CC
)
2418 return CONST0_RTX (mode
);
2420 /* Canonicalize XOR of the most significant bit to PLUS. */
2421 if ((CONST_INT_P (op1
)
2422 || GET_CODE (op1
) == CONST_DOUBLE
)
2423 && mode_signbit_p (mode
, op1
))
2424 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2425 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2426 if ((CONST_INT_P (op1
)
2427 || GET_CODE (op1
) == CONST_DOUBLE
)
2428 && GET_CODE (op0
) == PLUS
2429 && (CONST_INT_P (XEXP (op0
, 1))
2430 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2431 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2432 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2433 simplify_gen_binary (XOR
, mode
, op1
,
2436 /* If we are XORing two things that have no bits in common,
2437 convert them into an IOR. This helps to detect rotation encoded
2438 using those methods and possibly other simplifications. */
2440 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2441 && (nonzero_bits (op0
, mode
)
2442 & nonzero_bits (op1
, mode
)) == 0)
2443 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2445 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2446 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2449 int num_negated
= 0;
2451 if (GET_CODE (op0
) == NOT
)
2452 num_negated
++, op0
= XEXP (op0
, 0);
2453 if (GET_CODE (op1
) == NOT
)
2454 num_negated
++, op1
= XEXP (op1
, 0);
2456 if (num_negated
== 2)
2457 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2458 else if (num_negated
== 1)
2459 return simplify_gen_unary (NOT
, mode
,
2460 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2464 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2465 correspond to a machine insn or result in further simplifications
2466 if B is a constant. */
2468 if (GET_CODE (op0
) == AND
2469 && rtx_equal_p (XEXP (op0
, 1), op1
)
2470 && ! side_effects_p (op1
))
2471 return simplify_gen_binary (AND
, mode
,
2472 simplify_gen_unary (NOT
, mode
,
2473 XEXP (op0
, 0), mode
),
2476 else if (GET_CODE (op0
) == AND
2477 && rtx_equal_p (XEXP (op0
, 0), op1
)
2478 && ! side_effects_p (op1
))
2479 return simplify_gen_binary (AND
, mode
,
2480 simplify_gen_unary (NOT
, mode
,
2481 XEXP (op0
, 1), mode
),
2484 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2485 comparison if STORE_FLAG_VALUE is 1. */
2486 if (STORE_FLAG_VALUE
== 1
2487 && trueop1
== const1_rtx
2488 && COMPARISON_P (op0
)
2489 && (reversed
= reversed_comparison (op0
, mode
)))
2492 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2493 is (lt foo (const_int 0)), so we can perform the above
2494 simplification if STORE_FLAG_VALUE is 1. */
2496 if (STORE_FLAG_VALUE
== 1
2497 && trueop1
== const1_rtx
2498 && GET_CODE (op0
) == LSHIFTRT
2499 && CONST_INT_P (XEXP (op0
, 1))
2500 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2501 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2503 /* (xor (comparison foo bar) (const_int sign-bit))
2504 when STORE_FLAG_VALUE is the sign bit. */
2505 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2506 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2507 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2508 && trueop1
== const_true_rtx
2509 && COMPARISON_P (op0
)
2510 && (reversed
= reversed_comparison (op0
, mode
)))
2513 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2519 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2521 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
2523 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2524 HOST_WIDE_INT nzop1
;
2525 if (CONST_INT_P (trueop1
))
2527 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2528 /* If we are turning off bits already known off in OP0, we need
2530 if ((nzop0
& ~val1
) == 0)
2533 nzop1
= nonzero_bits (trueop1
, mode
);
2534 /* If we are clearing all the nonzero bits, the result is zero. */
2535 if ((nzop1
& nzop0
) == 0
2536 && !side_effects_p (op0
) && !side_effects_p (op1
))
2537 return CONST0_RTX (mode
);
2539 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2540 && GET_MODE_CLASS (mode
) != MODE_CC
)
2543 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2544 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2545 && ! side_effects_p (op0
)
2546 && GET_MODE_CLASS (mode
) != MODE_CC
)
2547 return CONST0_RTX (mode
);
2549 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2550 there are no nonzero bits of C outside of X's mode. */
2551 if ((GET_CODE (op0
) == SIGN_EXTEND
2552 || GET_CODE (op0
) == ZERO_EXTEND
)
2553 && CONST_INT_P (trueop1
)
2554 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2555 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2556 & UINTVAL (trueop1
)) == 0)
2558 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2559 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2560 gen_int_mode (INTVAL (trueop1
),
2562 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2565 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2566 we might be able to further simplify the AND with X and potentially
2567 remove the truncation altogether. */
2568 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2570 rtx x
= XEXP (op0
, 0);
2571 enum machine_mode xmode
= GET_MODE (x
);
2572 tem
= simplify_gen_binary (AND
, xmode
, x
,
2573 gen_int_mode (INTVAL (trueop1
), xmode
));
2574 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2577 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2578 if (GET_CODE (op0
) == IOR
2579 && CONST_INT_P (trueop1
)
2580 && CONST_INT_P (XEXP (op0
, 1)))
2582 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2583 return simplify_gen_binary (IOR
, mode
,
2584 simplify_gen_binary (AND
, mode
,
2585 XEXP (op0
, 0), op1
),
2586 gen_int_mode (tmp
, mode
));
2589 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2590 insn (and may simplify more). */
2591 if (GET_CODE (op0
) == XOR
2592 && rtx_equal_p (XEXP (op0
, 0), op1
)
2593 && ! side_effects_p (op1
))
2594 return simplify_gen_binary (AND
, mode
,
2595 simplify_gen_unary (NOT
, mode
,
2596 XEXP (op0
, 1), mode
),
2599 if (GET_CODE (op0
) == XOR
2600 && rtx_equal_p (XEXP (op0
, 1), op1
)
2601 && ! side_effects_p (op1
))
2602 return simplify_gen_binary (AND
, mode
,
2603 simplify_gen_unary (NOT
, mode
,
2604 XEXP (op0
, 0), mode
),
2607 /* Similarly for (~(A ^ B)) & A. */
2608 if (GET_CODE (op0
) == NOT
2609 && GET_CODE (XEXP (op0
, 0)) == XOR
2610 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2611 && ! side_effects_p (op1
))
2612 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2614 if (GET_CODE (op0
) == NOT
2615 && GET_CODE (XEXP (op0
, 0)) == XOR
2616 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2617 && ! side_effects_p (op1
))
2618 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2620 /* Convert (A | B) & A to A. */
2621 if (GET_CODE (op0
) == IOR
2622 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2623 || rtx_equal_p (XEXP (op0
, 1), op1
))
2624 && ! side_effects_p (XEXP (op0
, 0))
2625 && ! side_effects_p (XEXP (op0
, 1)))
2628 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2629 ((A & N) + B) & M -> (A + B) & M
2630 Similarly if (N & M) == 0,
2631 ((A | N) + B) & M -> (A + B) & M
2632 and for - instead of + and/or ^ instead of |.
2633 Also, if (N & M) == 0, then
2634 (A +- N) & M -> A & M. */
2635 if (CONST_INT_P (trueop1
)
2636 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2637 && ~UINTVAL (trueop1
)
2638 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2639 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2644 pmop
[0] = XEXP (op0
, 0);
2645 pmop
[1] = XEXP (op0
, 1);
2647 if (CONST_INT_P (pmop
[1])
2648 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2649 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2651 for (which
= 0; which
< 2; which
++)
2654 switch (GET_CODE (tem
))
2657 if (CONST_INT_P (XEXP (tem
, 1))
2658 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2659 == UINTVAL (trueop1
))
2660 pmop
[which
] = XEXP (tem
, 0);
2664 if (CONST_INT_P (XEXP (tem
, 1))
2665 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2666 pmop
[which
] = XEXP (tem
, 0);
2673 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2675 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2677 return simplify_gen_binary (code
, mode
, tem
, op1
);
2681 /* (and X (ior (not X) Y) -> (and X Y) */
2682 if (GET_CODE (op1
) == IOR
2683 && GET_CODE (XEXP (op1
, 0)) == NOT
2684 && op0
== XEXP (XEXP (op1
, 0), 0))
2685 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2687 /* (and (ior (not X) Y) X) -> (and X Y) */
2688 if (GET_CODE (op0
) == IOR
2689 && GET_CODE (XEXP (op0
, 0)) == NOT
2690 && op1
== XEXP (XEXP (op0
, 0), 0))
2691 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2693 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2699 /* 0/x is 0 (or x&0 if x has side-effects). */
2700 if (trueop0
== CONST0_RTX (mode
))
2702 if (side_effects_p (op1
))
2703 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2707 if (trueop1
== CONST1_RTX (mode
))
2708 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2709 /* Convert divide by power of two into shift. */
2710 if (CONST_INT_P (trueop1
)
2711 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
2712 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2716 /* Handle floating point and integers separately. */
2717 if (SCALAR_FLOAT_MODE_P (mode
))
2719 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2720 safe for modes with NaNs, since 0.0 / 0.0 will then be
2721 NaN rather than 0.0. Nor is it safe for modes with signed
2722 zeros, since dividing 0 by a negative number gives -0.0 */
2723 if (trueop0
== CONST0_RTX (mode
)
2724 && !HONOR_NANS (mode
)
2725 && !HONOR_SIGNED_ZEROS (mode
)
2726 && ! side_effects_p (op1
))
2729 if (trueop1
== CONST1_RTX (mode
)
2730 && !HONOR_SNANS (mode
))
2733 if (GET_CODE (trueop1
) == CONST_DOUBLE
2734 && trueop1
!= CONST0_RTX (mode
))
2737 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2740 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2741 && !HONOR_SNANS (mode
))
2742 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2744 /* Change FP division by a constant into multiplication.
2745 Only do this with -freciprocal-math. */
2746 if (flag_reciprocal_math
2747 && !REAL_VALUES_EQUAL (d
, dconst0
))
2749 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2750 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2751 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2757 /* 0/x is 0 (or x&0 if x has side-effects). */
2758 if (trueop0
== CONST0_RTX (mode
)
2759 && !cfun
->can_throw_non_call_exceptions
)
2761 if (side_effects_p (op1
))
2762 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2766 if (trueop1
== CONST1_RTX (mode
))
2767 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2769 if (trueop1
== constm1_rtx
)
2771 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2772 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2778 /* 0%x is 0 (or x&0 if x has side-effects). */
2779 if (trueop0
== CONST0_RTX (mode
))
2781 if (side_effects_p (op1
))
2782 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2785 /* x%1 is 0 (of x&0 if x has side-effects). */
2786 if (trueop1
== CONST1_RTX (mode
))
2788 if (side_effects_p (op0
))
2789 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2790 return CONST0_RTX (mode
);
2792 /* Implement modulus by power of two as AND. */
2793 if (CONST_INT_P (trueop1
)
2794 && exact_log2 (UINTVAL (trueop1
)) > 0)
2795 return simplify_gen_binary (AND
, mode
, op0
,
2796 GEN_INT (INTVAL (op1
) - 1));
2800 /* 0%x is 0 (or x&0 if x has side-effects). */
2801 if (trueop0
== CONST0_RTX (mode
))
2803 if (side_effects_p (op1
))
2804 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2807 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2808 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2810 if (side_effects_p (op0
))
2811 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2812 return CONST0_RTX (mode
);
2819 if (trueop1
== CONST0_RTX (mode
))
2821 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2823 /* Rotating ~0 always results in ~0. */
2824 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
2825 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
2826 && ! side_effects_p (op1
))
2829 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
2831 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
2832 if (val
!= INTVAL (op1
))
2833 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
2840 if (trueop1
== CONST0_RTX (mode
))
2842 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2844 goto canonicalize_shift
;
2847 if (trueop1
== CONST0_RTX (mode
))
2849 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2851 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2852 if (GET_CODE (op0
) == CLZ
2853 && CONST_INT_P (trueop1
)
2854 && STORE_FLAG_VALUE
== 1
2855 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
2857 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2858 unsigned HOST_WIDE_INT zero_val
= 0;
2860 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
2861 && zero_val
== GET_MODE_BITSIZE (imode
)
2862 && INTVAL (trueop1
) == exact_log2 (zero_val
))
2863 return simplify_gen_relational (EQ
, mode
, imode
,
2864 XEXP (op0
, 0), const0_rtx
);
2866 goto canonicalize_shift
;
2869 if (width
<= HOST_BITS_PER_WIDE_INT
2870 && CONST_INT_P (trueop1
)
2871 && UINTVAL (trueop1
) == (unsigned HOST_WIDE_INT
) 1 << (width
-1)
2872 && ! side_effects_p (op0
))
2874 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2876 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2882 if (width
<= HOST_BITS_PER_WIDE_INT
2883 && CONST_INT_P (trueop1
)
2884 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
2885 && ! side_effects_p (op0
))
2887 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2889 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2895 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2897 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2899 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2905 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2907 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2909 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2922 /* ??? There are simplifications that can be done. */
2926 if (!VECTOR_MODE_P (mode
))
2928 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2929 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2930 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2931 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2932 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
2934 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2935 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2938 /* Extract a scalar element from a nested VEC_SELECT expression
2939 (with optional nested VEC_CONCAT expression). Some targets
2940 (i386) extract scalar element from a vector using chain of
2941 nested VEC_SELECT expressions. When input operand is a memory
2942 operand, this operation can be simplified to a simple scalar
2943 load from an offseted memory address. */
2944 if (GET_CODE (trueop0
) == VEC_SELECT
)
2946 rtx op0
= XEXP (trueop0
, 0);
2947 rtx op1
= XEXP (trueop0
, 1);
2949 enum machine_mode opmode
= GET_MODE (op0
);
2950 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
2951 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
2953 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
2959 gcc_assert (GET_CODE (op1
) == PARALLEL
);
2960 gcc_assert (i
< n_elts
);
2962 /* Select element, pointed by nested selector. */
2963 elem
= INTVAL (XVECEXP (op1
, 0, i
));
2965 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2966 if (GET_CODE (op0
) == VEC_CONCAT
)
2968 rtx op00
= XEXP (op0
, 0);
2969 rtx op01
= XEXP (op0
, 1);
2971 enum machine_mode mode00
, mode01
;
2972 int n_elts00
, n_elts01
;
2974 mode00
= GET_MODE (op00
);
2975 mode01
= GET_MODE (op01
);
2977 /* Find out number of elements of each operand. */
2978 if (VECTOR_MODE_P (mode00
))
2980 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
2981 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
2986 if (VECTOR_MODE_P (mode01
))
2988 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
2989 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
2994 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
2996 /* Select correct operand of VEC_CONCAT
2997 and adjust selector. */
2998 if (elem
< n_elts01
)
3009 vec
= rtvec_alloc (1);
3010 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3012 tmp
= gen_rtx_fmt_ee (code
, mode
,
3013 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3016 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3017 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3018 return XEXP (trueop0
, 0);
3022 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3023 gcc_assert (GET_MODE_INNER (mode
)
3024 == GET_MODE_INNER (GET_MODE (trueop0
)));
3025 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3027 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3029 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3030 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3031 rtvec v
= rtvec_alloc (n_elts
);
3034 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3035 for (i
= 0; i
< n_elts
; i
++)
3037 rtx x
= XVECEXP (trueop1
, 0, i
);
3039 gcc_assert (CONST_INT_P (x
));
3040 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3044 return gen_rtx_CONST_VECTOR (mode
, v
);
3048 if (XVECLEN (trueop1
, 0) == 1
3049 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3050 && GET_CODE (trueop0
) == VEC_CONCAT
)
3053 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3055 /* Try to find the element in the VEC_CONCAT. */
3056 while (GET_MODE (vec
) != mode
3057 && GET_CODE (vec
) == VEC_CONCAT
)
3059 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3060 if (offset
< vec_size
)
3061 vec
= XEXP (vec
, 0);
3065 vec
= XEXP (vec
, 1);
3067 vec
= avoid_constant_pool_reference (vec
);
3070 if (GET_MODE (vec
) == mode
)
3077 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3078 ? GET_MODE (trueop0
)
3079 : GET_MODE_INNER (mode
));
3080 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3081 ? GET_MODE (trueop1
)
3082 : GET_MODE_INNER (mode
));
3084 gcc_assert (VECTOR_MODE_P (mode
));
3085 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3086 == GET_MODE_SIZE (mode
));
3088 if (VECTOR_MODE_P (op0_mode
))
3089 gcc_assert (GET_MODE_INNER (mode
)
3090 == GET_MODE_INNER (op0_mode
));
3092 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3094 if (VECTOR_MODE_P (op1_mode
))
3095 gcc_assert (GET_MODE_INNER (mode
)
3096 == GET_MODE_INNER (op1_mode
));
3098 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3100 if ((GET_CODE (trueop0
) == CONST_VECTOR
3101 || CONST_INT_P (trueop0
)
3102 || GET_CODE (trueop0
) == CONST_DOUBLE
)
3103 && (GET_CODE (trueop1
) == CONST_VECTOR
3104 || CONST_INT_P (trueop1
)
3105 || GET_CODE (trueop1
) == CONST_DOUBLE
))
3107 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3108 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3109 rtvec v
= rtvec_alloc (n_elts
);
3111 unsigned in_n_elts
= 1;
3113 if (VECTOR_MODE_P (op0_mode
))
3114 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3115 for (i
= 0; i
< n_elts
; i
++)
3119 if (!VECTOR_MODE_P (op0_mode
))
3120 RTVEC_ELT (v
, i
) = trueop0
;
3122 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3126 if (!VECTOR_MODE_P (op1_mode
))
3127 RTVEC_ELT (v
, i
) = trueop1
;
3129 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3134 return gen_rtx_CONST_VECTOR (mode
, v
);
3147 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3150 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3152 unsigned int width
= GET_MODE_BITSIZE (mode
);
3154 if (VECTOR_MODE_P (mode
)
3155 && code
!= VEC_CONCAT
3156 && GET_CODE (op0
) == CONST_VECTOR
3157 && GET_CODE (op1
) == CONST_VECTOR
)
3159 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3160 enum machine_mode op0mode
= GET_MODE (op0
);
3161 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3162 enum machine_mode op1mode
= GET_MODE (op1
);
3163 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3164 rtvec v
= rtvec_alloc (n_elts
);
3167 gcc_assert (op0_n_elts
== n_elts
);
3168 gcc_assert (op1_n_elts
== n_elts
);
3169 for (i
= 0; i
< n_elts
; i
++)
3171 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3172 CONST_VECTOR_ELT (op0
, i
),
3173 CONST_VECTOR_ELT (op1
, i
));
3176 RTVEC_ELT (v
, i
) = x
;
3179 return gen_rtx_CONST_VECTOR (mode
, v
);
3182 if (VECTOR_MODE_P (mode
)
3183 && code
== VEC_CONCAT
3184 && (CONST_INT_P (op0
)
3185 || GET_CODE (op0
) == CONST_DOUBLE
3186 || GET_CODE (op0
) == CONST_FIXED
)
3187 && (CONST_INT_P (op1
)
3188 || GET_CODE (op1
) == CONST_DOUBLE
3189 || GET_CODE (op1
) == CONST_FIXED
))
3191 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3192 rtvec v
= rtvec_alloc (n_elts
);
3194 gcc_assert (n_elts
>= 2);
3197 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3198 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3200 RTVEC_ELT (v
, 0) = op0
;
3201 RTVEC_ELT (v
, 1) = op1
;
3205 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3206 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3209 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3210 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3211 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3213 for (i
= 0; i
< op0_n_elts
; ++i
)
3214 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3215 for (i
= 0; i
< op1_n_elts
; ++i
)
3216 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3219 return gen_rtx_CONST_VECTOR (mode
, v
);
3222 if (SCALAR_FLOAT_MODE_P (mode
)
3223 && GET_CODE (op0
) == CONST_DOUBLE
3224 && GET_CODE (op1
) == CONST_DOUBLE
3225 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3236 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3238 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3240 for (i
= 0; i
< 4; i
++)
3257 real_from_target (&r
, tmp0
, mode
);
3258 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3262 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3265 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3266 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3267 real_convert (&f0
, mode
, &f0
);
3268 real_convert (&f1
, mode
, &f1
);
3270 if (HONOR_SNANS (mode
)
3271 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3275 && REAL_VALUES_EQUAL (f1
, dconst0
)
3276 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3279 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3280 && flag_trapping_math
3281 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3283 int s0
= REAL_VALUE_NEGATIVE (f0
);
3284 int s1
= REAL_VALUE_NEGATIVE (f1
);
3289 /* Inf + -Inf = NaN plus exception. */
3294 /* Inf - Inf = NaN plus exception. */
3299 /* Inf / Inf = NaN plus exception. */
3306 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3307 && flag_trapping_math
3308 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3309 || (REAL_VALUE_ISINF (f1
)
3310 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3311 /* Inf * 0 = NaN plus exception. */
3314 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3316 real_convert (&result
, mode
, &value
);
3318 /* Don't constant fold this floating point operation if
3319 the result has overflowed and flag_trapping_math. */
3321 if (flag_trapping_math
3322 && MODE_HAS_INFINITIES (mode
)
3323 && REAL_VALUE_ISINF (result
)
3324 && !REAL_VALUE_ISINF (f0
)
3325 && !REAL_VALUE_ISINF (f1
))
3326 /* Overflow plus exception. */
3329 /* Don't constant fold this floating point operation if the
3330 result may dependent upon the run-time rounding mode and
3331 flag_rounding_math is set, or if GCC's software emulation
3332 is unable to accurately represent the result. */
3334 if ((flag_rounding_math
3335 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3336 && (inexact
|| !real_identical (&result
, &value
)))
3339 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3343 /* We can fold some multi-word operations. */
3344 if (GET_MODE_CLASS (mode
) == MODE_INT
3345 && width
== HOST_BITS_PER_DOUBLE_INT
3346 && (CONST_DOUBLE_P (op0
) || CONST_INT_P (op0
))
3347 && (CONST_DOUBLE_P (op1
) || CONST_INT_P (op1
)))
3349 double_int o0
, o1
, res
, tmp
;
3351 o0
= rtx_to_double_int (op0
);
3352 o1
= rtx_to_double_int (op1
);
3357 /* A - B == A + (-B). */
3358 o1
= double_int_neg (o1
);
3360 /* Fall through.... */
3363 res
= double_int_add (o0
, o1
);
3367 res
= double_int_mul (o0
, o1
);
3371 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3372 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3373 &res
.low
, &res
.high
,
3374 &tmp
.low
, &tmp
.high
))
3379 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3380 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3381 &tmp
.low
, &tmp
.high
,
3382 &res
.low
, &res
.high
))
3387 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3388 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3389 &res
.low
, &res
.high
,
3390 &tmp
.low
, &tmp
.high
))
3395 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3396 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3397 &tmp
.low
, &tmp
.high
,
3398 &res
.low
, &res
.high
))
3403 res
= double_int_and (o0
, o1
);
3407 res
= double_int_ior (o0
, o1
);
3411 res
= double_int_xor (o0
, o1
);
3415 res
= double_int_smin (o0
, o1
);
3419 res
= double_int_smax (o0
, o1
);
3423 res
= double_int_umin (o0
, o1
);
3427 res
= double_int_umax (o0
, o1
);
3430 case LSHIFTRT
: case ASHIFTRT
:
3432 case ROTATE
: case ROTATERT
:
3434 unsigned HOST_WIDE_INT cnt
;
3436 if (SHIFT_COUNT_TRUNCATED
)
3437 o1
= double_int_zext (o1
, GET_MODE_BITSIZE (mode
));
3439 if (!double_int_fits_in_uhwi_p (o1
)
3440 || double_int_to_uhwi (o1
) >= GET_MODE_BITSIZE (mode
))
3443 cnt
= double_int_to_uhwi (o1
);
3445 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3446 res
= double_int_rshift (o0
, cnt
, GET_MODE_BITSIZE (mode
),
3448 else if (code
== ASHIFT
)
3449 res
= double_int_lshift (o0
, cnt
, GET_MODE_BITSIZE (mode
),
3451 else if (code
== ROTATE
)
3452 res
= double_int_lrotate (o0
, cnt
, GET_MODE_BITSIZE (mode
));
3453 else /* code == ROTATERT */
3454 res
= double_int_rrotate (o0
, cnt
, GET_MODE_BITSIZE (mode
));
3462 return immed_double_int_const (res
, mode
);
3465 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3466 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3468 /* Get the integer argument values in two forms:
3469 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3471 arg0
= INTVAL (op0
);
3472 arg1
= INTVAL (op1
);
3474 if (width
< HOST_BITS_PER_WIDE_INT
)
3476 arg0
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
3477 arg1
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
3480 if (arg0s
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)))
3481 arg0s
|= ((unsigned HOST_WIDE_INT
) (-1) << width
);
3484 if (arg1s
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)))
3485 arg1s
|= ((unsigned HOST_WIDE_INT
) (-1) << width
);
3493 /* Compute the value of the arithmetic. */
3498 val
= arg0s
+ arg1s
;
3502 val
= arg0s
- arg1s
;
3506 val
= arg0s
* arg1s
;
3511 || ((unsigned HOST_WIDE_INT
) arg0s
3512 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3515 val
= arg0s
/ arg1s
;
3520 || ((unsigned HOST_WIDE_INT
) arg0s
3521 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3524 val
= arg0s
% arg1s
;
3529 || ((unsigned HOST_WIDE_INT
) arg0s
3530 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3533 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3538 || ((unsigned HOST_WIDE_INT
) arg0s
3539 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3542 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3560 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3561 the value is in range. We can't return any old value for
3562 out-of-range arguments because either the middle-end (via
3563 shift_truncation_mask) or the back-end might be relying on
3564 target-specific knowledge. Nor can we rely on
3565 shift_truncation_mask, since the shift might not be part of an
3566 ashlM3, lshrM3 or ashrM3 instruction. */
3567 if (SHIFT_COUNT_TRUNCATED
)
3568 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3569 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3572 val
= (code
== ASHIFT
3573 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3574 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3576 /* Sign-extend the result for arithmetic right shifts. */
3577 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3578 val
|= ((unsigned HOST_WIDE_INT
) (-1)) << (width
- arg1
);
3586 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3587 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3595 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3596 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3600 /* Do nothing here. */
3604 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3608 val
= ((unsigned HOST_WIDE_INT
) arg0
3609 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3613 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3617 val
= ((unsigned HOST_WIDE_INT
) arg0
3618 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3631 /* ??? There are simplifications that can be done. */
3638 return gen_int_mode (val
, mode
);
3646 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3649 Rather than test for specific case, we do this by a brute-force method
3650 and do all possible simplifications until no more changes occur. Then
3651 we rebuild the operation. */
3653 struct simplify_plus_minus_op_data
3660 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3664 result
= (commutative_operand_precedence (y
)
3665 - commutative_operand_precedence (x
));
3669 /* Group together equal REGs to do more simplification. */
3670 if (REG_P (x
) && REG_P (y
))
3671 return REGNO (x
) > REGNO (y
);
3677 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3680 struct simplify_plus_minus_op_data ops
[8];
3682 int n_ops
= 2, input_ops
= 2;
3683 int changed
, n_constants
= 0, canonicalized
= 0;
3686 memset (ops
, 0, sizeof ops
);
3688 /* Set up the two operands and then expand them until nothing has been
3689 changed. If we run out of room in our array, give up; this should
3690 almost never happen. */
3695 ops
[1].neg
= (code
== MINUS
);
3701 for (i
= 0; i
< n_ops
; i
++)
3703 rtx this_op
= ops
[i
].op
;
3704 int this_neg
= ops
[i
].neg
;
3705 enum rtx_code this_code
= GET_CODE (this_op
);
3714 ops
[n_ops
].op
= XEXP (this_op
, 1);
3715 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3718 ops
[i
].op
= XEXP (this_op
, 0);
3721 canonicalized
|= this_neg
;
3725 ops
[i
].op
= XEXP (this_op
, 0);
3726 ops
[i
].neg
= ! this_neg
;
3733 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3734 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3735 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3737 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3738 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3739 ops
[n_ops
].neg
= this_neg
;
3747 /* ~a -> (-a - 1) */
3750 ops
[n_ops
].op
= constm1_rtx
;
3751 ops
[n_ops
++].neg
= this_neg
;
3752 ops
[i
].op
= XEXP (this_op
, 0);
3753 ops
[i
].neg
= !this_neg
;
3763 ops
[i
].op
= neg_const_int (mode
, this_op
);
3777 if (n_constants
> 1)
3780 gcc_assert (n_ops
>= 2);
3782 /* If we only have two operands, we can avoid the loops. */
3785 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3788 /* Get the two operands. Be careful with the order, especially for
3789 the cases where code == MINUS. */
3790 if (ops
[0].neg
&& ops
[1].neg
)
3792 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3795 else if (ops
[0].neg
)
3806 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3809 /* Now simplify each pair of operands until nothing changes. */
3812 /* Insertion sort is good enough for an eight-element array. */
3813 for (i
= 1; i
< n_ops
; i
++)
3815 struct simplify_plus_minus_op_data save
;
3817 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
3823 ops
[j
+ 1] = ops
[j
];
3824 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
3829 for (i
= n_ops
- 1; i
> 0; i
--)
3830 for (j
= i
- 1; j
>= 0; j
--)
3832 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
3833 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
3835 if (lhs
!= 0 && rhs
!= 0)
3837 enum rtx_code ncode
= PLUS
;
3843 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3845 else if (swap_commutative_operands_p (lhs
, rhs
))
3846 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3848 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
3849 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
3851 rtx tem_lhs
, tem_rhs
;
3853 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3854 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3855 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3857 if (tem
&& !CONSTANT_P (tem
))
3858 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3861 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3863 /* Reject "simplifications" that just wrap the two
3864 arguments in a CONST. Failure to do so can result
3865 in infinite recursion with simplify_binary_operation
3866 when it calls us to simplify CONST operations. */
3868 && ! (GET_CODE (tem
) == CONST
3869 && GET_CODE (XEXP (tem
, 0)) == ncode
3870 && XEXP (XEXP (tem
, 0), 0) == lhs
3871 && XEXP (XEXP (tem
, 0), 1) == rhs
))
3874 if (GET_CODE (tem
) == NEG
)
3875 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3876 if (CONST_INT_P (tem
) && lneg
)
3877 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3881 ops
[j
].op
= NULL_RTX
;
3888 /* If nothing changed, fail. */
3892 /* Pack all the operands to the lower-numbered entries. */
3893 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3903 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3905 && CONST_INT_P (ops
[1].op
)
3906 && CONSTANT_P (ops
[0].op
)
3908 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3910 /* We suppressed creation of trivial CONST expressions in the
3911 combination loop to avoid recursion. Create one manually now.
3912 The combination loop should have ensured that there is exactly
3913 one CONST_INT, and the sort will have ensured that it is last
3914 in the array and that any other constant will be next-to-last. */
3917 && CONST_INT_P (ops
[n_ops
- 1].op
)
3918 && CONSTANT_P (ops
[n_ops
- 2].op
))
3920 rtx value
= ops
[n_ops
- 1].op
;
3921 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3922 value
= neg_const_int (mode
, value
);
3923 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3927 /* Put a non-negated operand first, if possible. */
3929 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3932 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3941 /* Now make the result by performing the requested operations. */
3943 for (i
= 1; i
< n_ops
; i
++)
3944 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3945 mode
, result
, ops
[i
].op
);
3950 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3952 plus_minus_operand_p (const_rtx x
)
3954 return GET_CODE (x
) == PLUS
3955 || GET_CODE (x
) == MINUS
3956 || (GET_CODE (x
) == CONST
3957 && GET_CODE (XEXP (x
, 0)) == PLUS
3958 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3959 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3962 /* Like simplify_binary_operation except used for relational operators.
3963 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3964 not also be VOIDmode.
3966 CMP_MODE specifies in which mode the comparison is done in, so it is
3967 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3968 the operands or, if both are VOIDmode, the operands are compared in
3969 "infinite precision". */
3971 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3972 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3974 rtx tem
, trueop0
, trueop1
;
3976 if (cmp_mode
== VOIDmode
)
3977 cmp_mode
= GET_MODE (op0
);
3978 if (cmp_mode
== VOIDmode
)
3979 cmp_mode
= GET_MODE (op1
);
3981 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3984 if (SCALAR_FLOAT_MODE_P (mode
))
3986 if (tem
== const0_rtx
)
3987 return CONST0_RTX (mode
);
3988 #ifdef FLOAT_STORE_FLAG_VALUE
3990 REAL_VALUE_TYPE val
;
3991 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3992 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3998 if (VECTOR_MODE_P (mode
))
4000 if (tem
== const0_rtx
)
4001 return CONST0_RTX (mode
);
4002 #ifdef VECTOR_STORE_FLAG_VALUE
4007 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4008 if (val
== NULL_RTX
)
4010 if (val
== const1_rtx
)
4011 return CONST1_RTX (mode
);
4013 units
= GET_MODE_NUNITS (mode
);
4014 v
= rtvec_alloc (units
);
4015 for (i
= 0; i
< units
; i
++)
4016 RTVEC_ELT (v
, i
) = val
;
4017 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4027 /* For the following tests, ensure const0_rtx is op1. */
4028 if (swap_commutative_operands_p (op0
, op1
)
4029 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4030 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4032 /* If op0 is a compare, extract the comparison arguments from it. */
4033 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4034 return simplify_gen_relational (code
, mode
, VOIDmode
,
4035 XEXP (op0
, 0), XEXP (op0
, 1));
4037 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4041 trueop0
= avoid_constant_pool_reference (op0
);
4042 trueop1
= avoid_constant_pool_reference (op1
);
4043 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4047 /* This part of simplify_relational_operation is only used when CMP_MODE
4048 is not in class MODE_CC (i.e. it is a real comparison).
4050 MODE is the mode of the result, while CMP_MODE specifies in which
4051 mode the comparison is done in, so it is the mode of the operands. */
4054 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4055 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4057 enum rtx_code op0code
= GET_CODE (op0
);
4059 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4061 /* If op0 is a comparison, extract the comparison arguments
4065 if (GET_MODE (op0
) == mode
)
4066 return simplify_rtx (op0
);
4068 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4069 XEXP (op0
, 0), XEXP (op0
, 1));
4071 else if (code
== EQ
)
4073 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4074 if (new_code
!= UNKNOWN
)
4075 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4076 XEXP (op0
, 0), XEXP (op0
, 1));
4080 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4081 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4082 if ((code
== LTU
|| code
== GEU
)
4083 && GET_CODE (op0
) == PLUS
4084 && CONST_INT_P (XEXP (op0
, 1))
4085 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4086 || rtx_equal_p (op1
, XEXP (op0
, 1))))
4089 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4090 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4091 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4094 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4095 if ((code
== LTU
|| code
== GEU
)
4096 && GET_CODE (op0
) == PLUS
4097 && rtx_equal_p (op1
, XEXP (op0
, 1))
4098 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4099 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4100 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4101 copy_rtx (XEXP (op0
, 0)));
4103 if (op1
== const0_rtx
)
4105 /* Canonicalize (GTU x 0) as (NE x 0). */
4107 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4108 /* Canonicalize (LEU x 0) as (EQ x 0). */
4110 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4112 else if (op1
== const1_rtx
)
4117 /* Canonicalize (GE x 1) as (GT x 0). */
4118 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4121 /* Canonicalize (GEU x 1) as (NE x 0). */
4122 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4125 /* Canonicalize (LT x 1) as (LE x 0). */
4126 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4129 /* Canonicalize (LTU x 1) as (EQ x 0). */
4130 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4136 else if (op1
== constm1_rtx
)
4138 /* Canonicalize (LE x -1) as (LT x 0). */
4140 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4141 /* Canonicalize (GT x -1) as (GE x 0). */
4143 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4146 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4147 if ((code
== EQ
|| code
== NE
)
4148 && (op0code
== PLUS
|| op0code
== MINUS
)
4150 && CONSTANT_P (XEXP (op0
, 1))
4151 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4153 rtx x
= XEXP (op0
, 0);
4154 rtx c
= XEXP (op0
, 1);
4156 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
4158 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
4161 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4162 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4164 && op1
== const0_rtx
4165 && GET_MODE_CLASS (mode
) == MODE_INT
4166 && cmp_mode
!= VOIDmode
4167 /* ??? Work-around BImode bugs in the ia64 backend. */
4169 && cmp_mode
!= BImode
4170 && nonzero_bits (op0
, cmp_mode
) == 1
4171 && STORE_FLAG_VALUE
== 1)
4172 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4173 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4174 : lowpart_subreg (mode
, op0
, cmp_mode
);
4176 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4177 if ((code
== EQ
|| code
== NE
)
4178 && op1
== const0_rtx
4180 return simplify_gen_relational (code
, mode
, cmp_mode
,
4181 XEXP (op0
, 0), XEXP (op0
, 1));
4183 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4184 if ((code
== EQ
|| code
== NE
)
4186 && rtx_equal_p (XEXP (op0
, 0), op1
)
4187 && !side_effects_p (XEXP (op0
, 0)))
4188 return simplify_gen_relational (code
, mode
, cmp_mode
,
4189 XEXP (op0
, 1), const0_rtx
);
4191 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4192 if ((code
== EQ
|| code
== NE
)
4194 && rtx_equal_p (XEXP (op0
, 1), op1
)
4195 && !side_effects_p (XEXP (op0
, 1)))
4196 return simplify_gen_relational (code
, mode
, cmp_mode
,
4197 XEXP (op0
, 0), const0_rtx
);
4199 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4200 if ((code
== EQ
|| code
== NE
)
4202 && (CONST_INT_P (op1
)
4203 || GET_CODE (op1
) == CONST_DOUBLE
)
4204 && (CONST_INT_P (XEXP (op0
, 1))
4205 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
4206 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4207 simplify_gen_binary (XOR
, cmp_mode
,
4208 XEXP (op0
, 1), op1
));
4210 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4216 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4217 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4218 XEXP (op0
, 0), const0_rtx
);
4223 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4224 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4225 XEXP (op0
, 0), const0_rtx
);
4244 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4245 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4246 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4247 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4248 For floating-point comparisons, assume that the operands were ordered. */
4251 comparison_result (enum rtx_code code
, int known_results
)
4257 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4260 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4264 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4267 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4271 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4274 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4277 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4279 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4282 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4284 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4287 return const_true_rtx
;
4295 /* Check if the given comparison (done in the given MODE) is actually a
4296 tautology or a contradiction.
4297 If no simplification is possible, this function returns zero.
4298 Otherwise, it returns either const_true_rtx or const0_rtx. */
4301 simplify_const_relational_operation (enum rtx_code code
,
4302 enum machine_mode mode
,
4309 gcc_assert (mode
!= VOIDmode
4310 || (GET_MODE (op0
) == VOIDmode
4311 && GET_MODE (op1
) == VOIDmode
));
4313 /* If op0 is a compare, extract the comparison arguments from it. */
4314 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4316 op1
= XEXP (op0
, 1);
4317 op0
= XEXP (op0
, 0);
4319 if (GET_MODE (op0
) != VOIDmode
)
4320 mode
= GET_MODE (op0
);
4321 else if (GET_MODE (op1
) != VOIDmode
)
4322 mode
= GET_MODE (op1
);
4327 /* We can't simplify MODE_CC values since we don't know what the
4328 actual comparison is. */
4329 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4332 /* Make sure the constant is second. */
4333 if (swap_commutative_operands_p (op0
, op1
))
4335 tem
= op0
, op0
= op1
, op1
= tem
;
4336 code
= swap_condition (code
);
4339 trueop0
= avoid_constant_pool_reference (op0
);
4340 trueop1
= avoid_constant_pool_reference (op1
);
4342 /* For integer comparisons of A and B maybe we can simplify A - B and can
4343 then simplify a comparison of that with zero. If A and B are both either
4344 a register or a CONST_INT, this can't help; testing for these cases will
4345 prevent infinite recursion here and speed things up.
4347 We can only do this for EQ and NE comparisons as otherwise we may
4348 lose or introduce overflow which we cannot disregard as undefined as
4349 we do not know the signedness of the operation on either the left or
4350 the right hand side of the comparison. */
4352 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4353 && (code
== EQ
|| code
== NE
)
4354 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4355 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4356 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4357 /* We cannot do this if tem is a nonzero address. */
4358 && ! nonzero_address_p (tem
))
4359 return simplify_const_relational_operation (signed_condition (code
),
4360 mode
, tem
, const0_rtx
);
4362 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4363 return const_true_rtx
;
4365 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4368 /* For modes without NaNs, if the two operands are equal, we know the
4369 result except if they have side-effects. Even with NaNs we know
4370 the result of unordered comparisons and, if signaling NaNs are
4371 irrelevant, also the result of LT/GT/LTGT. */
4372 if ((! HONOR_NANS (GET_MODE (trueop0
))
4373 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4374 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4375 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4376 && rtx_equal_p (trueop0
, trueop1
)
4377 && ! side_effects_p (trueop0
))
4378 return comparison_result (code
, CMP_EQ
);
4380 /* If the operands are floating-point constants, see if we can fold
4382 if (GET_CODE (trueop0
) == CONST_DOUBLE
4383 && GET_CODE (trueop1
) == CONST_DOUBLE
4384 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4386 REAL_VALUE_TYPE d0
, d1
;
4388 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4389 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4391 /* Comparisons are unordered iff at least one of the values is NaN. */
4392 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4402 return const_true_rtx
;
4415 return comparison_result (code
,
4416 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4417 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4420 /* Otherwise, see if the operands are both integers. */
4421 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4422 && (GET_CODE (trueop0
) == CONST_DOUBLE
4423 || CONST_INT_P (trueop0
))
4424 && (GET_CODE (trueop1
) == CONST_DOUBLE
4425 || CONST_INT_P (trueop1
)))
4427 int width
= GET_MODE_BITSIZE (mode
);
4428 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4429 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4431 /* Get the two words comprising each integer constant. */
4432 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
4434 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4435 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4439 l0u
= l0s
= INTVAL (trueop0
);
4440 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4443 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4445 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4446 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4450 l1u
= l1s
= INTVAL (trueop1
);
4451 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4454 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4455 we have to sign or zero-extend the values. */
4456 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4458 l0u
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
4459 l1u
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
4461 if (l0s
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)))
4462 l0s
|= ((unsigned HOST_WIDE_INT
) (-1) << width
);
4464 if (l1s
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)))
4465 l1s
|= ((unsigned HOST_WIDE_INT
) (-1) << width
);
4467 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4468 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4470 if (h0u
== h1u
&& l0u
== l1u
)
4471 return comparison_result (code
, CMP_EQ
);
4475 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4476 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4477 return comparison_result (code
, cr
);
4481 /* Optimize comparisons with upper and lower bounds. */
4482 if (SCALAR_INT_MODE_P (mode
)
4483 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
4484 && CONST_INT_P (trueop1
))
4487 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4488 HOST_WIDE_INT val
= INTVAL (trueop1
);
4489 HOST_WIDE_INT mmin
, mmax
;
4499 /* Get a reduced range if the sign bit is zero. */
4500 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4507 rtx mmin_rtx
, mmax_rtx
;
4508 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4510 mmin
= INTVAL (mmin_rtx
);
4511 mmax
= INTVAL (mmax_rtx
);
4514 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4516 mmin
>>= (sign_copies
- 1);
4517 mmax
>>= (sign_copies
- 1);
4523 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4525 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4526 return const_true_rtx
;
4527 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4532 return const_true_rtx
;
4537 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4539 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4540 return const_true_rtx
;
4541 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4546 return const_true_rtx
;
4552 /* x == y is always false for y out of range. */
4553 if (val
< mmin
|| val
> mmax
)
4557 /* x > y is always false for y >= mmax, always true for y < mmin. */
4559 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4561 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4562 return const_true_rtx
;
4568 return const_true_rtx
;
4571 /* x < y is always false for y <= mmin, always true for y > mmax. */
4573 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4575 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4576 return const_true_rtx
;
4582 return const_true_rtx
;
4586 /* x != y is always true for y out of range. */
4587 if (val
< mmin
|| val
> mmax
)
4588 return const_true_rtx
;
4596 /* Optimize integer comparisons with zero. */
4597 if (trueop1
== const0_rtx
)
4599 /* Some addresses are known to be nonzero. We don't know
4600 their sign, but equality comparisons are known. */
4601 if (nonzero_address_p (trueop0
))
4603 if (code
== EQ
|| code
== LEU
)
4605 if (code
== NE
|| code
== GTU
)
4606 return const_true_rtx
;
4609 /* See if the first operand is an IOR with a constant. If so, we
4610 may be able to determine the result of this comparison. */
4611 if (GET_CODE (op0
) == IOR
)
4613 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4614 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4616 int sign_bitnum
= GET_MODE_BITSIZE (mode
) - 1;
4617 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4618 && (UINTVAL (inner_const
)
4619 & ((unsigned HOST_WIDE_INT
) 1
4629 return const_true_rtx
;
4633 return const_true_rtx
;
4647 /* Optimize comparison of ABS with zero. */
4648 if (trueop1
== CONST0_RTX (mode
)
4649 && (GET_CODE (trueop0
) == ABS
4650 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4651 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4656 /* Optimize abs(x) < 0.0. */
4657 if (!HONOR_SNANS (mode
)
4658 && (!INTEGRAL_MODE_P (mode
)
4659 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4661 if (INTEGRAL_MODE_P (mode
)
4662 && (issue_strict_overflow_warning
4663 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4664 warning (OPT_Wstrict_overflow
,
4665 ("assuming signed overflow does not occur when "
4666 "assuming abs (x) < 0 is false"));
4672 /* Optimize abs(x) >= 0.0. */
4673 if (!HONOR_NANS (mode
)
4674 && (!INTEGRAL_MODE_P (mode
)
4675 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4677 if (INTEGRAL_MODE_P (mode
)
4678 && (issue_strict_overflow_warning
4679 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4680 warning (OPT_Wstrict_overflow
,
4681 ("assuming signed overflow does not occur when "
4682 "assuming abs (x) >= 0 is true"));
4683 return const_true_rtx
;
4688 /* Optimize ! (abs(x) < 0.0). */
4689 return const_true_rtx
;
4699 /* Simplify CODE, an operation with result mode MODE and three operands,
4700 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4701 a constant. Return 0 if no simplifications is possible. */
4704 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4705 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4708 unsigned int width
= GET_MODE_BITSIZE (mode
);
4709 bool any_change
= false;
4712 /* VOIDmode means "infinite" precision. */
4714 width
= HOST_BITS_PER_WIDE_INT
;
4719 /* Simplify negations around the multiplication. */
4720 /* -a * -b + c => a * b + c. */
4721 if (GET_CODE (op0
) == NEG
)
4723 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
4725 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
4727 else if (GET_CODE (op1
) == NEG
)
4729 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
4731 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
4734 /* Canonicalize the two multiplication operands. */
4735 /* a * -b + c => -b * a + c. */
4736 if (swap_commutative_operands_p (op0
, op1
))
4737 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
4740 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
4745 if (CONST_INT_P (op0
)
4746 && CONST_INT_P (op1
)
4747 && CONST_INT_P (op2
)
4748 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4749 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4751 /* Extracting a bit-field from a constant */
4752 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
4754 if (BITS_BIG_ENDIAN
)
4755 val
>>= GET_MODE_BITSIZE (op0_mode
) - INTVAL (op2
) - INTVAL (op1
);
4757 val
>>= INTVAL (op2
);
4759 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4761 /* First zero-extend. */
4762 val
&= ((unsigned HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4763 /* If desired, propagate sign bit. */
4764 if (code
== SIGN_EXTRACT
4765 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1)))
4767 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4770 /* Clear the bits that don't belong in our mode,
4771 unless they and our sign bit are all one.
4772 So we get either a reasonable negative value or a reasonable
4773 unsigned value for this mode. */
4774 if (width
< HOST_BITS_PER_WIDE_INT
4775 && ((val
& ((unsigned HOST_WIDE_INT
) (-1) << (width
- 1)))
4776 != ((unsigned HOST_WIDE_INT
) (-1) << (width
- 1))))
4777 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
4779 return gen_int_mode (val
, mode
);
4784 if (CONST_INT_P (op0
))
4785 return op0
!= const0_rtx
? op1
: op2
;
4787 /* Convert c ? a : a into "a". */
4788 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4791 /* Convert a != b ? a : b into "a". */
4792 if (GET_CODE (op0
) == NE
4793 && ! side_effects_p (op0
)
4794 && ! HONOR_NANS (mode
)
4795 && ! HONOR_SIGNED_ZEROS (mode
)
4796 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4797 && rtx_equal_p (XEXP (op0
, 1), op2
))
4798 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4799 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4802 /* Convert a == b ? a : b into "b". */
4803 if (GET_CODE (op0
) == EQ
4804 && ! side_effects_p (op0
)
4805 && ! HONOR_NANS (mode
)
4806 && ! HONOR_SIGNED_ZEROS (mode
)
4807 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4808 && rtx_equal_p (XEXP (op0
, 1), op2
))
4809 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4810 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4813 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4815 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4816 ? GET_MODE (XEXP (op0
, 1))
4817 : GET_MODE (XEXP (op0
, 0)));
4820 /* Look for happy constants in op1 and op2. */
4821 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
4823 HOST_WIDE_INT t
= INTVAL (op1
);
4824 HOST_WIDE_INT f
= INTVAL (op2
);
4826 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4827 code
= GET_CODE (op0
);
4828 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4831 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4839 return simplify_gen_relational (code
, mode
, cmp_mode
,
4840 XEXP (op0
, 0), XEXP (op0
, 1));
4843 if (cmp_mode
== VOIDmode
)
4844 cmp_mode
= op0_mode
;
4845 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4846 cmp_mode
, XEXP (op0
, 0),
4849 /* See if any simplifications were possible. */
4852 if (CONST_INT_P (temp
))
4853 return temp
== const0_rtx
? op2
: op1
;
4855 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4861 gcc_assert (GET_MODE (op0
) == mode
);
4862 gcc_assert (GET_MODE (op1
) == mode
);
4863 gcc_assert (VECTOR_MODE_P (mode
));
4864 op2
= avoid_constant_pool_reference (op2
);
4865 if (CONST_INT_P (op2
))
4867 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4868 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4869 int mask
= (1 << n_elts
) - 1;
4871 if (!(INTVAL (op2
) & mask
))
4873 if ((INTVAL (op2
) & mask
) == mask
)
4876 op0
= avoid_constant_pool_reference (op0
);
4877 op1
= avoid_constant_pool_reference (op1
);
4878 if (GET_CODE (op0
) == CONST_VECTOR
4879 && GET_CODE (op1
) == CONST_VECTOR
)
4881 rtvec v
= rtvec_alloc (n_elts
);
4884 for (i
= 0; i
< n_elts
; i
++)
4885 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4886 ? CONST_VECTOR_ELT (op0
, i
)
4887 : CONST_VECTOR_ELT (op1
, i
));
4888 return gen_rtx_CONST_VECTOR (mode
, v
);
4900 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4902 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4904 Works by unpacking OP into a collection of 8-bit values
4905 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4906 and then repacking them again for OUTERMODE. */
4909 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4910 enum machine_mode innermode
, unsigned int byte
)
4912 /* We support up to 512-bit values (for V8DFmode). */
4916 value_mask
= (1 << value_bit
) - 1
4918 unsigned char value
[max_bitsize
/ value_bit
];
4927 rtvec result_v
= NULL
;
4928 enum mode_class outer_class
;
4929 enum machine_mode outer_submode
;
4931 /* Some ports misuse CCmode. */
4932 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
4935 /* We have no way to represent a complex constant at the rtl level. */
4936 if (COMPLEX_MODE_P (outermode
))
4939 /* Unpack the value. */
4941 if (GET_CODE (op
) == CONST_VECTOR
)
4943 num_elem
= CONST_VECTOR_NUNITS (op
);
4944 elems
= &CONST_VECTOR_ELT (op
, 0);
4945 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4951 elem_bitsize
= max_bitsize
;
4953 /* If this asserts, it is too complicated; reducing value_bit may help. */
4954 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4955 /* I don't know how to handle endianness of sub-units. */
4956 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4958 for (elem
= 0; elem
< num_elem
; elem
++)
4961 rtx el
= elems
[elem
];
4963 /* Vectors are kept in target memory order. (This is probably
4966 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4967 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4969 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4970 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4971 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4972 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4973 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4976 switch (GET_CODE (el
))
4980 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4982 *vp
++ = INTVAL (el
) >> i
;
4983 /* CONST_INTs are always logically sign-extended. */
4984 for (; i
< elem_bitsize
; i
+= value_bit
)
4985 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4989 if (GET_MODE (el
) == VOIDmode
)
4991 /* If this triggers, someone should have generated a
4992 CONST_INT instead. */
4993 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4995 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4996 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4997 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
5000 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5003 /* It shouldn't matter what's done here, so fill it with
5005 for (; i
< elem_bitsize
; i
+= value_bit
)
5010 long tmp
[max_bitsize
/ 32];
5011 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5013 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5014 gcc_assert (bitsize
<= elem_bitsize
);
5015 gcc_assert (bitsize
% value_bit
== 0);
5017 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5020 /* real_to_target produces its result in words affected by
5021 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5022 and use WORDS_BIG_ENDIAN instead; see the documentation
5023 of SUBREG in rtl.texi. */
5024 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5027 if (WORDS_BIG_ENDIAN
)
5028 ibase
= bitsize
- 1 - i
;
5031 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5034 /* It shouldn't matter what's done here, so fill it with
5036 for (; i
< elem_bitsize
; i
+= value_bit
)
5042 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5044 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5045 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5049 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5050 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5051 for (; i
< 2 * HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5053 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5054 >> (i
- HOST_BITS_PER_WIDE_INT
);
5055 for (; i
< elem_bitsize
; i
+= value_bit
)
5065 /* Now, pick the right byte to start with. */
5066 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5067 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5068 will already have offset 0. */
5069 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5071 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5073 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5074 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5075 byte
= (subword_byte
% UNITS_PER_WORD
5076 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5079 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5080 so if it's become negative it will instead be very large.) */
5081 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5083 /* Convert from bytes to chunks of size value_bit. */
5084 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5086 /* Re-pack the value. */
5088 if (VECTOR_MODE_P (outermode
))
5090 num_elem
= GET_MODE_NUNITS (outermode
);
5091 result_v
= rtvec_alloc (num_elem
);
5092 elems
= &RTVEC_ELT (result_v
, 0);
5093 outer_submode
= GET_MODE_INNER (outermode
);
5099 outer_submode
= outermode
;
5102 outer_class
= GET_MODE_CLASS (outer_submode
);
5103 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5105 gcc_assert (elem_bitsize
% value_bit
== 0);
5106 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5108 for (elem
= 0; elem
< num_elem
; elem
++)
5112 /* Vectors are stored in target memory order. (This is probably
5115 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5116 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5118 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5119 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5120 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5121 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5122 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5125 switch (outer_class
)
5128 case MODE_PARTIAL_INT
:
5130 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5133 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5135 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5136 for (; i
< elem_bitsize
; i
+= value_bit
)
5137 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5138 << (i
- HOST_BITS_PER_WIDE_INT
);
5140 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5142 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5143 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5144 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
5145 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5152 case MODE_DECIMAL_FLOAT
:
5155 long tmp
[max_bitsize
/ 32];
5157 /* real_from_target wants its input in words affected by
5158 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5159 and use WORDS_BIG_ENDIAN instead; see the documentation
5160 of SUBREG in rtl.texi. */
5161 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5163 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5166 if (WORDS_BIG_ENDIAN
)
5167 ibase
= elem_bitsize
- 1 - i
;
5170 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5173 real_from_target (&r
, tmp
, outer_submode
);
5174 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5186 f
.mode
= outer_submode
;
5189 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5191 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5192 for (; i
< elem_bitsize
; i
+= value_bit
)
5193 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5194 << (i
- HOST_BITS_PER_WIDE_INT
));
5196 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5204 if (VECTOR_MODE_P (outermode
))
5205 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5210 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5211 Return 0 if no simplifications are possible. */
5213 simplify_subreg (enum machine_mode outermode
, rtx op
,
5214 enum machine_mode innermode
, unsigned int byte
)
5216 /* Little bit of sanity checking. */
5217 gcc_assert (innermode
!= VOIDmode
);
5218 gcc_assert (outermode
!= VOIDmode
);
5219 gcc_assert (innermode
!= BLKmode
);
5220 gcc_assert (outermode
!= BLKmode
);
5222 gcc_assert (GET_MODE (op
) == innermode
5223 || GET_MODE (op
) == VOIDmode
);
5225 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
5226 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5228 if (outermode
== innermode
&& !byte
)
5231 if (CONST_INT_P (op
)
5232 || GET_CODE (op
) == CONST_DOUBLE
5233 || GET_CODE (op
) == CONST_FIXED
5234 || GET_CODE (op
) == CONST_VECTOR
)
5235 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5237 /* Changing mode twice with SUBREG => just change it once,
5238 or not at all if changing back op starting mode. */
5239 if (GET_CODE (op
) == SUBREG
)
5241 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5242 int final_offset
= byte
+ SUBREG_BYTE (op
);
5245 if (outermode
== innermostmode
5246 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5247 return SUBREG_REG (op
);
5249 /* The SUBREG_BYTE represents offset, as if the value were stored
5250 in memory. Irritating exception is paradoxical subreg, where
5251 we define SUBREG_BYTE to be 0. On big endian machines, this
5252 value should be negative. For a moment, undo this exception. */
5253 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5255 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5256 if (WORDS_BIG_ENDIAN
)
5257 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5258 if (BYTES_BIG_ENDIAN
)
5259 final_offset
+= difference
% UNITS_PER_WORD
;
5261 if (SUBREG_BYTE (op
) == 0
5262 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5264 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5265 if (WORDS_BIG_ENDIAN
)
5266 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5267 if (BYTES_BIG_ENDIAN
)
5268 final_offset
+= difference
% UNITS_PER_WORD
;
5271 /* See whether resulting subreg will be paradoxical. */
5272 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5274 /* In nonparadoxical subregs we can't handle negative offsets. */
5275 if (final_offset
< 0)
5277 /* Bail out in case resulting subreg would be incorrect. */
5278 if (final_offset
% GET_MODE_SIZE (outermode
)
5279 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5285 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5287 /* In paradoxical subreg, see if we are still looking on lower part.
5288 If so, our SUBREG_BYTE will be 0. */
5289 if (WORDS_BIG_ENDIAN
)
5290 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5291 if (BYTES_BIG_ENDIAN
)
5292 offset
+= difference
% UNITS_PER_WORD
;
5293 if (offset
== final_offset
)
5299 /* Recurse for further possible simplifications. */
5300 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5304 if (validate_subreg (outermode
, innermostmode
,
5305 SUBREG_REG (op
), final_offset
))
5307 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5308 if (SUBREG_PROMOTED_VAR_P (op
)
5309 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5310 && GET_MODE_CLASS (outermode
) == MODE_INT
5311 && IN_RANGE (GET_MODE_SIZE (outermode
),
5312 GET_MODE_SIZE (innermode
),
5313 GET_MODE_SIZE (innermostmode
))
5314 && subreg_lowpart_p (newx
))
5316 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5317 SUBREG_PROMOTED_UNSIGNED_SET
5318 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5325 /* Merge implicit and explicit truncations. */
5327 if (GET_CODE (op
) == TRUNCATE
5328 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5329 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5330 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5331 GET_MODE (XEXP (op
, 0)));
5333 /* SUBREG of a hard register => just change the register number
5334 and/or mode. If the hard register is not valid in that mode,
5335 suppress this simplification. If the hard register is the stack,
5336 frame, or argument pointer, leave this as a SUBREG. */
5338 if (REG_P (op
) && HARD_REGISTER_P (op
))
5340 unsigned int regno
, final_regno
;
5343 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5344 if (HARD_REGISTER_NUM_P (final_regno
))
5347 int final_offset
= byte
;
5349 /* Adjust offset for paradoxical subregs. */
5351 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5353 int difference
= (GET_MODE_SIZE (innermode
)
5354 - GET_MODE_SIZE (outermode
));
5355 if (WORDS_BIG_ENDIAN
)
5356 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5357 if (BYTES_BIG_ENDIAN
)
5358 final_offset
+= difference
% UNITS_PER_WORD
;
5361 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5363 /* Propagate original regno. We don't have any way to specify
5364 the offset inside original regno, so do so only for lowpart.
5365 The information is used only by alias analysis that can not
5366 grog partial register anyway. */
5368 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5369 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5374 /* If we have a SUBREG of a register that we are replacing and we are
5375 replacing it with a MEM, make a new MEM and try replacing the
5376 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5377 or if we would be widening it. */
5380 && ! mode_dependent_address_p (XEXP (op
, 0))
5381 /* Allow splitting of volatile memory references in case we don't
5382 have instruction to move the whole thing. */
5383 && (! MEM_VOLATILE_P (op
)
5384 || ! have_insn_for (SET
, innermode
))
5385 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5386 return adjust_address_nv (op
, outermode
, byte
);
5388 /* Handle complex values represented as CONCAT
5389 of real and imaginary part. */
5390 if (GET_CODE (op
) == CONCAT
)
5392 unsigned int part_size
, final_offset
;
5395 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5396 if (byte
< part_size
)
5398 part
= XEXP (op
, 0);
5399 final_offset
= byte
;
5403 part
= XEXP (op
, 1);
5404 final_offset
= byte
- part_size
;
5407 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5410 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5413 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5414 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5418 /* Optimize SUBREG truncations of zero and sign extended values. */
5419 if ((GET_CODE (op
) == ZERO_EXTEND
5420 || GET_CODE (op
) == SIGN_EXTEND
)
5421 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
5423 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5425 /* If we're requesting the lowpart of a zero or sign extension,
5426 there are three possibilities. If the outermode is the same
5427 as the origmode, we can omit both the extension and the subreg.
5428 If the outermode is not larger than the origmode, we can apply
5429 the truncation without the extension. Finally, if the outermode
5430 is larger than the origmode, but both are integer modes, we
5431 can just extend to the appropriate mode. */
5434 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5435 if (outermode
== origmode
)
5436 return XEXP (op
, 0);
5437 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
5438 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5439 subreg_lowpart_offset (outermode
,
5441 if (SCALAR_INT_MODE_P (outermode
))
5442 return simplify_gen_unary (GET_CODE (op
), outermode
,
5443 XEXP (op
, 0), origmode
);
5446 /* A SUBREG resulting from a zero extension may fold to zero if
5447 it extracts higher bits that the ZERO_EXTEND's source bits. */
5448 if (GET_CODE (op
) == ZERO_EXTEND
5449 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
5450 return CONST0_RTX (outermode
);
5453 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5454 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5455 the outer subreg is effectively a truncation to the original mode. */
5456 if ((GET_CODE (op
) == LSHIFTRT
5457 || GET_CODE (op
) == ASHIFTRT
)
5458 && SCALAR_INT_MODE_P (outermode
)
5459 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5460 to avoid the possibility that an outer LSHIFTRT shifts by more
5461 than the sign extension's sign_bit_copies and introduces zeros
5462 into the high bits of the result. */
5463 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
5464 && CONST_INT_P (XEXP (op
, 1))
5465 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5466 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5467 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5468 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5469 return simplify_gen_binary (ASHIFTRT
, outermode
,
5470 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5472 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5473 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5474 the outer subreg is effectively a truncation to the original mode. */
5475 if ((GET_CODE (op
) == LSHIFTRT
5476 || GET_CODE (op
) == ASHIFTRT
)
5477 && SCALAR_INT_MODE_P (outermode
)
5478 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5479 && CONST_INT_P (XEXP (op
, 1))
5480 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5481 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5482 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5483 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5484 return simplify_gen_binary (LSHIFTRT
, outermode
,
5485 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5487 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5488 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5489 the outer subreg is effectively a truncation to the original mode. */
5490 if (GET_CODE (op
) == ASHIFT
5491 && SCALAR_INT_MODE_P (outermode
)
5492 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5493 && CONST_INT_P (XEXP (op
, 1))
5494 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5495 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5496 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5497 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5498 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5499 return simplify_gen_binary (ASHIFT
, outermode
,
5500 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5502 /* Recognize a word extraction from a multi-word subreg. */
5503 if ((GET_CODE (op
) == LSHIFTRT
5504 || GET_CODE (op
) == ASHIFTRT
)
5505 && SCALAR_INT_MODE_P (outermode
)
5506 && GET_MODE_BITSIZE (outermode
) >= BITS_PER_WORD
5507 && GET_MODE_BITSIZE (innermode
) >= (2 * GET_MODE_BITSIZE (outermode
))
5508 && CONST_INT_P (XEXP (op
, 1))
5509 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_BITSIZE (outermode
) - 1)) == 0
5510 && INTVAL (XEXP (op
, 1)) >= 0
5511 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5512 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5514 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5515 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5517 ? byte
- shifted_bytes
5518 : byte
+ shifted_bytes
));
5521 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5522 and try replacing the SUBREG and shift with it. Don't do this if
5523 the MEM has a mode-dependent address or if we would be widening it. */
5525 if ((GET_CODE (op
) == LSHIFTRT
5526 || GET_CODE (op
) == ASHIFTRT
)
5527 && MEM_P (XEXP (op
, 0))
5528 && CONST_INT_P (XEXP (op
, 1))
5529 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (GET_MODE (op
))
5530 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (outermode
)) == 0
5531 && INTVAL (XEXP (op
, 1)) > 0
5532 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5533 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0))
5534 && ! MEM_VOLATILE_P (XEXP (op
, 0))
5535 && byte
== subreg_lowpart_offset (outermode
, innermode
)
5536 && (GET_MODE_SIZE (outermode
) >= UNITS_PER_WORD
5537 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
5539 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5540 return adjust_address_nv (XEXP (op
, 0), outermode
,
5542 ? byte
- shifted_bytes
5543 : byte
+ shifted_bytes
));
5549 /* Make a SUBREG operation or equivalent if it folds. */
5552 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5553 enum machine_mode innermode
, unsigned int byte
)
5557 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5561 if (GET_CODE (op
) == SUBREG
5562 || GET_CODE (op
) == CONCAT
5563 || GET_MODE (op
) == VOIDmode
)
5566 if (validate_subreg (outermode
, innermode
, op
, byte
))
5567 return gen_rtx_SUBREG (outermode
, op
, byte
);
5572 /* Simplify X, an rtx expression.
5574 Return the simplified expression or NULL if no simplifications
5577 This is the preferred entry point into the simplification routines;
5578 however, we still allow passes to call the more specific routines.
5580 Right now GCC has three (yes, three) major bodies of RTL simplification
5581 code that need to be unified.
5583 1. fold_rtx in cse.c. This code uses various CSE specific
5584 information to aid in RTL simplification.
5586 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5587 it uses combine specific information to aid in RTL
5590 3. The routines in this file.
5593 Long term we want to only have one body of simplification code; to
5594 get to that state I recommend the following steps:
5596 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5597 which are not pass dependent state into these routines.
5599 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5600 use this routine whenever possible.
5602 3. Allow for pass dependent state to be provided to these
5603 routines and add simplifications based on the pass dependent
5604 state. Remove code from cse.c & combine.c that becomes
5607 It will take time, but ultimately the compiler will be easier to
5608 maintain and improve. It's totally silly that when we add a
5609 simplification that it needs to be added to 4 places (3 for RTL
5610 simplification and 1 for tree simplification. */
5613 simplify_rtx (const_rtx x
)
5615 const enum rtx_code code
= GET_CODE (x
);
5616 const enum machine_mode mode
= GET_MODE (x
);
5618 switch (GET_RTX_CLASS (code
))
5621 return simplify_unary_operation (code
, mode
,
5622 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5623 case RTX_COMM_ARITH
:
5624 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5625 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5627 /* Fall through.... */
5630 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5633 case RTX_BITFIELD_OPS
:
5634 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5635 XEXP (x
, 0), XEXP (x
, 1),
5639 case RTX_COMM_COMPARE
:
5640 return simplify_relational_operation (code
, mode
,
5641 ((GET_MODE (XEXP (x
, 0))
5643 ? GET_MODE (XEXP (x
, 0))
5644 : GET_MODE (XEXP (x
, 1))),
5650 return simplify_subreg (mode
, SUBREG_REG (x
),
5651 GET_MODE (SUBREG_REG (x
)),
5658 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5659 if (GET_CODE (XEXP (x
, 0)) == HIGH
5660 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))