1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
53 static bool plus_minus_operand_p (const_rtx
);
54 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
61 enum machine_mode
, rtx
, rtx
);
62 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode
, const_rtx i
)
71 return gen_int_mode (- INTVAL (i
), mode
);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
80 unsigned HOST_WIDE_INT val
;
83 if (GET_MODE_CLASS (mode
) != MODE_INT
)
86 width
= GET_MODE_BITSIZE (mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
93 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x
) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x
) == 0)
97 val
= CONST_DOUBLE_HIGH (x
);
98 width
-= HOST_BITS_PER_WIDE_INT
;
103 if (width
< HOST_BITS_PER_WIDE_INT
)
104 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
105 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
117 /* If this simplifies, do it. */
118 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0
, op1
))
125 tem
= op0
, op0
= op1
, op1
= tem
;
127 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x
)
136 enum machine_mode cmode
;
137 HOST_WIDE_INT offset
= 0;
139 switch (GET_CODE (x
))
145 /* Handle float extensions of constant pool references. */
147 c
= avoid_constant_pool_reference (tmp
);
148 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
152 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
161 if (GET_MODE (x
) == BLKmode
)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr
= targetm
.delegitimize_address (addr
);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr
) == CONST
171 && GET_CODE (XEXP (addr
, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
174 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
175 addr
= XEXP (XEXP (addr
, 0), 0);
178 if (GET_CODE (addr
) == LO_SUM
)
179 addr
= XEXP (addr
, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr
) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr
))
186 c
= get_pool_constant (addr
);
187 cmode
= get_pool_mode (addr
);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset
!= 0 || cmode
!= GET_MODE (x
))
194 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
195 if (tem
&& CONSTANT_P (tem
))
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x
)
215 || GET_CODE (MEM_OFFSET (x
)) == CONST_INT
))
217 tree decl
= MEM_EXPR (x
);
218 enum machine_mode mode
= GET_MODE (x
);
219 HOST_WIDE_INT offset
= 0;
221 switch (TREE_CODE (decl
))
231 case ARRAY_RANGE_REF
:
236 case VIEW_CONVERT_EXPR
:
238 HOST_WIDE_INT bitsize
, bitpos
;
240 int unsignedp
= 0, volatilep
= 0;
242 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
243 &mode
, &unsignedp
, &volatilep
, false);
244 if (bitsize
!= GET_MODE_BITSIZE (mode
)
245 || (bitpos
% BITS_PER_UNIT
)
246 || (toffset
&& !host_integerp (toffset
, 0)))
250 offset
+= bitpos
/ BITS_PER_UNIT
;
252 offset
+= TREE_INT_CST_LOW (toffset
);
259 && mode
== GET_MODE (x
)
260 && TREE_CODE (decl
) == VAR_DECL
261 && (TREE_STATIC (decl
)
262 || DECL_THREAD_LOCAL_P (decl
))
263 && DECL_RTL_SET_P (decl
)
264 && MEM_P (DECL_RTL (decl
)))
269 offset
+= INTVAL (MEM_OFFSET (x
));
271 newx
= DECL_RTL (decl
);
275 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
284 || (GET_CODE (o
) == PLUS
285 && GET_CODE (XEXP (o
, 1)) == CONST_INT
286 && (offset
== INTVAL (XEXP (o
, 1))
287 || (GET_CODE (n
) == PLUS
288 && GET_CODE (XEXP (n
, 1)) == CONST_INT
289 && (INTVAL (XEXP (n
, 1)) + offset
290 == INTVAL (XEXP (o
, 1)))
291 && (n
= XEXP (n
, 0))))
292 && (o
= XEXP (o
, 0))))
293 && rtx_equal_p (o
, n
)))
294 x
= adjust_address_nv (newx
, mode
, offset
);
296 else if (GET_MODE (x
) == GET_MODE (newx
)
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
310 enum machine_mode op_mode
)
314 /* If this simplifies, use it. */
315 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
318 return gen_rtx_fmt_e (code
, mode
, op
);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
325 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
329 /* If this simplifies, use it. */
330 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
334 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
342 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
346 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
350 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
353 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
354 resulting RTX. Return a new RTX which is as simplified as possible. */
357 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
359 enum rtx_code code
= GET_CODE (x
);
360 enum machine_mode mode
= GET_MODE (x
);
361 enum machine_mode op_mode
;
364 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
365 to build a new expression substituting recursively. If we can't do
366 anything, return our input. */
371 switch (GET_RTX_CLASS (code
))
375 op_mode
= GET_MODE (op0
);
376 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
377 if (op0
== XEXP (x
, 0))
379 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
383 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
384 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
385 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
387 return simplify_gen_binary (code
, mode
, op0
, op1
);
390 case RTX_COMM_COMPARE
:
393 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
394 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
395 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
396 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
398 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
401 case RTX_BITFIELD_OPS
:
403 op_mode
= GET_MODE (op0
);
404 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
405 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
406 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
407 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
409 if (op_mode
== VOIDmode
)
410 op_mode
= GET_MODE (op0
);
411 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
414 /* The only case we try to handle is a SUBREG. */
417 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
418 if (op0
== SUBREG_REG (x
))
420 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
421 GET_MODE (SUBREG_REG (x
)),
423 return op0
? op0
: x
;
430 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
431 if (op0
== XEXP (x
, 0))
433 return replace_equiv_address_nv (x
, op0
);
435 else if (code
== LO_SUM
)
437 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
438 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
440 /* (lo_sum (high x) x) -> x */
441 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
444 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
446 return gen_rtx_LO_SUM (mode
, op0
, op1
);
448 else if (code
== REG
)
450 if (rtx_equal_p (x
, old_rtx
))
461 /* Try to simplify a unary operation CODE whose output mode is to be
462 MODE with input operand OP whose mode was originally OP_MODE.
463 Return zero if no simplification can be made. */
465 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
466 rtx op
, enum machine_mode op_mode
)
470 if (GET_CODE (op
) == CONST
)
473 trueop
= avoid_constant_pool_reference (op
);
475 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
479 return simplify_unary_operation_1 (code
, mode
, op
);
482 /* Perform some simplifications we can do even if the operands
485 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
487 enum rtx_code reversed
;
493 /* (not (not X)) == X. */
494 if (GET_CODE (op
) == NOT
)
497 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
498 comparison is all ones. */
499 if (COMPARISON_P (op
)
500 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
501 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
502 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
503 XEXP (op
, 0), XEXP (op
, 1));
505 /* (not (plus X -1)) can become (neg X). */
506 if (GET_CODE (op
) == PLUS
507 && XEXP (op
, 1) == constm1_rtx
)
508 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
510 /* Similarly, (not (neg X)) is (plus X -1). */
511 if (GET_CODE (op
) == NEG
)
512 return plus_constant (XEXP (op
, 0), -1);
514 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
515 if (GET_CODE (op
) == XOR
516 && CONST_INT_P (XEXP (op
, 1))
517 && (temp
= simplify_unary_operation (NOT
, mode
,
518 XEXP (op
, 1), mode
)) != 0)
519 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
521 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
522 if (GET_CODE (op
) == PLUS
523 && CONST_INT_P (XEXP (op
, 1))
524 && mode_signbit_p (mode
, XEXP (op
, 1))
525 && (temp
= simplify_unary_operation (NOT
, mode
,
526 XEXP (op
, 1), mode
)) != 0)
527 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
530 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
531 operands other than 1, but that is not valid. We could do a
532 similar simplification for (not (lshiftrt C X)) where C is
533 just the sign bit, but this doesn't seem common enough to
535 if (GET_CODE (op
) == ASHIFT
536 && XEXP (op
, 0) == const1_rtx
)
538 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
539 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
542 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
543 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
544 so we can perform the above simplification. */
546 if (STORE_FLAG_VALUE
== -1
547 && GET_CODE (op
) == ASHIFTRT
548 && GET_CODE (XEXP (op
, 1))
549 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
550 return simplify_gen_relational (GE
, mode
, VOIDmode
,
551 XEXP (op
, 0), const0_rtx
);
554 if (GET_CODE (op
) == SUBREG
555 && subreg_lowpart_p (op
)
556 && (GET_MODE_SIZE (GET_MODE (op
))
557 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
558 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
559 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
561 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
564 x
= gen_rtx_ROTATE (inner_mode
,
565 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
567 XEXP (SUBREG_REG (op
), 1));
568 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
571 /* Apply De Morgan's laws to reduce number of patterns for machines
572 with negating logical insns (and-not, nand, etc.). If result has
573 only one NOT, put it first, since that is how the patterns are
576 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
578 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
579 enum machine_mode op_mode
;
581 op_mode
= GET_MODE (in1
);
582 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
584 op_mode
= GET_MODE (in2
);
585 if (op_mode
== VOIDmode
)
587 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
589 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
592 in2
= in1
; in1
= tem
;
595 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
601 /* (neg (neg X)) == X. */
602 if (GET_CODE (op
) == NEG
)
605 /* (neg (plus X 1)) can become (not X). */
606 if (GET_CODE (op
) == PLUS
607 && XEXP (op
, 1) == const1_rtx
)
608 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
610 /* Similarly, (neg (not X)) is (plus X 1). */
611 if (GET_CODE (op
) == NOT
)
612 return plus_constant (XEXP (op
, 0), 1);
614 /* (neg (minus X Y)) can become (minus Y X). This transformation
615 isn't safe for modes with signed zeros, since if X and Y are
616 both +0, (minus Y X) is the same as (minus X Y). If the
617 rounding mode is towards +infinity (or -infinity) then the two
618 expressions will be rounded differently. */
619 if (GET_CODE (op
) == MINUS
620 && !HONOR_SIGNED_ZEROS (mode
)
621 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
622 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
624 if (GET_CODE (op
) == PLUS
625 && !HONOR_SIGNED_ZEROS (mode
)
626 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
628 /* (neg (plus A C)) is simplified to (minus -C A). */
629 if (CONST_INT_P (XEXP (op
, 1))
630 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
632 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
634 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
637 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
638 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
639 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
642 /* (neg (mult A B)) becomes (mult (neg A) B).
643 This works even for floating-point values. */
644 if (GET_CODE (op
) == MULT
645 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
647 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
648 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
651 /* NEG commutes with ASHIFT since it is multiplication. Only do
652 this if we can then eliminate the NEG (e.g., if the operand
654 if (GET_CODE (op
) == ASHIFT
)
656 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
658 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
661 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
662 C is equal to the width of MODE minus 1. */
663 if (GET_CODE (op
) == ASHIFTRT
664 && CONST_INT_P (XEXP (op
, 1))
665 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
666 return simplify_gen_binary (LSHIFTRT
, mode
,
667 XEXP (op
, 0), XEXP (op
, 1));
669 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
670 C is equal to the width of MODE minus 1. */
671 if (GET_CODE (op
) == LSHIFTRT
672 && CONST_INT_P (XEXP (op
, 1))
673 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
674 return simplify_gen_binary (ASHIFTRT
, mode
,
675 XEXP (op
, 0), XEXP (op
, 1));
677 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
678 if (GET_CODE (op
) == XOR
679 && XEXP (op
, 1) == const1_rtx
680 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
681 return plus_constant (XEXP (op
, 0), -1);
683 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
684 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
685 if (GET_CODE (op
) == LT
686 && XEXP (op
, 1) == const0_rtx
687 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
689 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
690 int isize
= GET_MODE_BITSIZE (inner
);
691 if (STORE_FLAG_VALUE
== 1)
693 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
694 GEN_INT (isize
- 1));
697 if (GET_MODE_BITSIZE (mode
) > isize
)
698 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
699 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
701 else if (STORE_FLAG_VALUE
== -1)
703 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
704 GEN_INT (isize
- 1));
707 if (GET_MODE_BITSIZE (mode
) > isize
)
708 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
709 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
715 /* We can't handle truncation to a partial integer mode here
716 because we don't know the real bitsize of the partial
718 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
721 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
722 if ((GET_CODE (op
) == SIGN_EXTEND
723 || GET_CODE (op
) == ZERO_EXTEND
)
724 && GET_MODE (XEXP (op
, 0)) == mode
)
727 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
728 (OP:SI foo:SI) if OP is NEG or ABS. */
729 if ((GET_CODE (op
) == ABS
730 || GET_CODE (op
) == NEG
)
731 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
732 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
733 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
734 return simplify_gen_unary (GET_CODE (op
), mode
,
735 XEXP (XEXP (op
, 0), 0), mode
);
737 /* (truncate:A (subreg:B (truncate:C X) 0)) is
739 if (GET_CODE (op
) == SUBREG
740 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
741 && subreg_lowpart_p (op
))
742 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
743 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
745 /* If we know that the value is already truncated, we can
746 replace the TRUNCATE with a SUBREG. Note that this is also
747 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
748 modes we just have to apply a different definition for
749 truncation. But don't do this for an (LSHIFTRT (MULT ...))
750 since this will cause problems with the umulXi3_highpart
752 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
753 GET_MODE_BITSIZE (GET_MODE (op
)))
754 ? (num_sign_bit_copies (op
, GET_MODE (op
))
755 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op
))
756 - GET_MODE_BITSIZE (mode
)))
757 : truncated_to_mode (mode
, op
))
758 && ! (GET_CODE (op
) == LSHIFTRT
759 && GET_CODE (XEXP (op
, 0)) == MULT
))
760 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
762 /* A truncate of a comparison can be replaced with a subreg if
763 STORE_FLAG_VALUE permits. This is like the previous test,
764 but it works even if the comparison is done in a mode larger
765 than HOST_BITS_PER_WIDE_INT. */
766 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
768 && ((HOST_WIDE_INT
) STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
769 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
773 if (DECIMAL_FLOAT_MODE_P (mode
))
776 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
777 if (GET_CODE (op
) == FLOAT_EXTEND
778 && GET_MODE (XEXP (op
, 0)) == mode
)
781 /* (float_truncate:SF (float_truncate:DF foo:XF))
782 = (float_truncate:SF foo:XF).
783 This may eliminate double rounding, so it is unsafe.
785 (float_truncate:SF (float_extend:XF foo:DF))
786 = (float_truncate:SF foo:DF).
788 (float_truncate:DF (float_extend:XF foo:SF))
789 = (float_extend:SF foo:DF). */
790 if ((GET_CODE (op
) == FLOAT_TRUNCATE
791 && flag_unsafe_math_optimizations
)
792 || GET_CODE (op
) == FLOAT_EXTEND
)
793 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
795 > GET_MODE_SIZE (mode
)
796 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
800 /* (float_truncate (float x)) is (float x) */
801 if (GET_CODE (op
) == FLOAT
802 && (flag_unsafe_math_optimizations
803 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
804 && ((unsigned)significand_size (GET_MODE (op
))
805 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
806 - num_sign_bit_copies (XEXP (op
, 0),
807 GET_MODE (XEXP (op
, 0))))))))
808 return simplify_gen_unary (FLOAT
, mode
,
810 GET_MODE (XEXP (op
, 0)));
812 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
813 (OP:SF foo:SF) if OP is NEG or ABS. */
814 if ((GET_CODE (op
) == ABS
815 || GET_CODE (op
) == NEG
)
816 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
817 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
818 return simplify_gen_unary (GET_CODE (op
), mode
,
819 XEXP (XEXP (op
, 0), 0), mode
);
821 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
822 is (float_truncate:SF x). */
823 if (GET_CODE (op
) == SUBREG
824 && subreg_lowpart_p (op
)
825 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
826 return SUBREG_REG (op
);
830 if (DECIMAL_FLOAT_MODE_P (mode
))
833 /* (float_extend (float_extend x)) is (float_extend x)
835 (float_extend (float x)) is (float x) assuming that double
836 rounding can't happen.
838 if (GET_CODE (op
) == FLOAT_EXTEND
839 || (GET_CODE (op
) == FLOAT
840 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
841 && ((unsigned)significand_size (GET_MODE (op
))
842 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
843 - num_sign_bit_copies (XEXP (op
, 0),
844 GET_MODE (XEXP (op
, 0)))))))
845 return simplify_gen_unary (GET_CODE (op
), mode
,
847 GET_MODE (XEXP (op
, 0)));
852 /* (abs (neg <foo>)) -> (abs <foo>) */
853 if (GET_CODE (op
) == NEG
)
854 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
855 GET_MODE (XEXP (op
, 0)));
857 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
859 if (GET_MODE (op
) == VOIDmode
)
862 /* If operand is something known to be positive, ignore the ABS. */
863 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
864 || ((GET_MODE_BITSIZE (GET_MODE (op
))
865 <= HOST_BITS_PER_WIDE_INT
)
866 && ((nonzero_bits (op
, GET_MODE (op
))
868 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
872 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
873 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
874 return gen_rtx_NEG (mode
, op
);
879 /* (ffs (*_extend <X>)) = (ffs <X>) */
880 if (GET_CODE (op
) == SIGN_EXTEND
881 || GET_CODE (op
) == ZERO_EXTEND
)
882 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
883 GET_MODE (XEXP (op
, 0)));
887 switch (GET_CODE (op
))
891 /* (popcount (zero_extend <X>)) = (popcount <X>) */
892 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
893 GET_MODE (XEXP (op
, 0)));
897 /* Rotations don't affect popcount. */
898 if (!side_effects_p (XEXP (op
, 1)))
899 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
900 GET_MODE (XEXP (op
, 0)));
909 switch (GET_CODE (op
))
915 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
916 GET_MODE (XEXP (op
, 0)));
920 /* Rotations don't affect parity. */
921 if (!side_effects_p (XEXP (op
, 1)))
922 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
923 GET_MODE (XEXP (op
, 0)));
932 /* (bswap (bswap x)) -> x. */
933 if (GET_CODE (op
) == BSWAP
)
938 /* (float (sign_extend <X>)) = (float <X>). */
939 if (GET_CODE (op
) == SIGN_EXTEND
)
940 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
941 GET_MODE (XEXP (op
, 0)));
945 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
946 becomes just the MINUS if its mode is MODE. This allows
947 folding switch statements on machines using casesi (such as
949 if (GET_CODE (op
) == TRUNCATE
950 && GET_MODE (XEXP (op
, 0)) == mode
951 && GET_CODE (XEXP (op
, 0)) == MINUS
952 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
953 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
956 /* Check for a sign extension of a subreg of a promoted
957 variable, where the promotion is sign-extended, and the
958 target mode is the same as the variable's promotion. */
959 if (GET_CODE (op
) == SUBREG
960 && SUBREG_PROMOTED_VAR_P (op
)
961 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
962 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
963 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
965 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
966 if (! POINTERS_EXTEND_UNSIGNED
967 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
969 || (GET_CODE (op
) == SUBREG
970 && REG_P (SUBREG_REG (op
))
971 && REG_POINTER (SUBREG_REG (op
))
972 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
973 return convert_memory_address (Pmode
, op
);
978 /* Check for a zero extension of a subreg of a promoted
979 variable, where the promotion is zero-extended, and the
980 target mode is the same as the variable's promotion. */
981 if (GET_CODE (op
) == SUBREG
982 && SUBREG_PROMOTED_VAR_P (op
)
983 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
984 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
985 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
987 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
988 if (POINTERS_EXTEND_UNSIGNED
> 0
989 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
991 || (GET_CODE (op
) == SUBREG
992 && REG_P (SUBREG_REG (op
))
993 && REG_POINTER (SUBREG_REG (op
))
994 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
995 return convert_memory_address (Pmode
, op
);
1006 /* Try to compute the value of a unary operation CODE whose output mode is to
1007 be MODE with input operand OP whose mode was originally OP_MODE.
1008 Return zero if the value cannot be computed. */
1010 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1011 rtx op
, enum machine_mode op_mode
)
1013 unsigned int width
= GET_MODE_BITSIZE (mode
);
1015 if (code
== VEC_DUPLICATE
)
1017 gcc_assert (VECTOR_MODE_P (mode
));
1018 if (GET_MODE (op
) != VOIDmode
)
1020 if (!VECTOR_MODE_P (GET_MODE (op
)))
1021 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1023 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1026 if (CONST_INT_P (op
) || GET_CODE (op
) == CONST_DOUBLE
1027 || GET_CODE (op
) == CONST_VECTOR
)
1029 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1030 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1031 rtvec v
= rtvec_alloc (n_elts
);
1034 if (GET_CODE (op
) != CONST_VECTOR
)
1035 for (i
= 0; i
< n_elts
; i
++)
1036 RTVEC_ELT (v
, i
) = op
;
1039 enum machine_mode inmode
= GET_MODE (op
);
1040 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1041 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1043 gcc_assert (in_n_elts
< n_elts
);
1044 gcc_assert ((n_elts
% in_n_elts
) == 0);
1045 for (i
= 0; i
< n_elts
; i
++)
1046 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1048 return gen_rtx_CONST_VECTOR (mode
, v
);
1052 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1054 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1055 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1056 enum machine_mode opmode
= GET_MODE (op
);
1057 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1058 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1059 rtvec v
= rtvec_alloc (n_elts
);
1062 gcc_assert (op_n_elts
== n_elts
);
1063 for (i
= 0; i
< n_elts
; i
++)
1065 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1066 CONST_VECTOR_ELT (op
, i
),
1067 GET_MODE_INNER (opmode
));
1070 RTVEC_ELT (v
, i
) = x
;
1072 return gen_rtx_CONST_VECTOR (mode
, v
);
1075 /* The order of these tests is critical so that, for example, we don't
1076 check the wrong mode (input vs. output) for a conversion operation,
1077 such as FIX. At some point, this should be simplified. */
1079 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
1080 && (GET_CODE (op
) == CONST_DOUBLE
|| CONST_INT_P (op
)))
1082 HOST_WIDE_INT hv
, lv
;
1085 if (CONST_INT_P (op
))
1086 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1088 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1090 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1091 d
= real_value_truncate (mode
, d
);
1092 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1094 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
1095 && (GET_CODE (op
) == CONST_DOUBLE
1096 || CONST_INT_P (op
)))
1098 HOST_WIDE_INT hv
, lv
;
1101 if (CONST_INT_P (op
))
1102 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1104 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1106 if (op_mode
== VOIDmode
)
1108 /* We don't know how to interpret negative-looking numbers in
1109 this case, so don't try to fold those. */
1113 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
1116 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1118 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1119 d
= real_value_truncate (mode
, d
);
1120 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1123 if (CONST_INT_P (op
)
1124 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1126 HOST_WIDE_INT arg0
= INTVAL (op
);
1140 val
= (arg0
>= 0 ? arg0
: - arg0
);
1144 /* Don't use ffs here. Instead, get low order bit and then its
1145 number. If arg0 is zero, this will return 0, as desired. */
1146 arg0
&= GET_MODE_MASK (mode
);
1147 val
= exact_log2 (arg0
& (- arg0
)) + 1;
1151 arg0
&= GET_MODE_MASK (mode
);
1152 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1155 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1159 arg0
&= GET_MODE_MASK (mode
);
1162 /* Even if the value at zero is undefined, we have to come
1163 up with some replacement. Seems good enough. */
1164 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1165 val
= GET_MODE_BITSIZE (mode
);
1168 val
= exact_log2 (arg0
& -arg0
);
1172 arg0
&= GET_MODE_MASK (mode
);
1175 val
++, arg0
&= arg0
- 1;
1179 arg0
&= GET_MODE_MASK (mode
);
1182 val
++, arg0
&= arg0
- 1;
1191 for (s
= 0; s
< width
; s
+= 8)
1193 unsigned int d
= width
- s
- 8;
1194 unsigned HOST_WIDE_INT byte
;
1195 byte
= (arg0
>> s
) & 0xff;
1206 /* When zero-extending a CONST_INT, we need to know its
1208 gcc_assert (op_mode
!= VOIDmode
);
1209 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1211 /* If we were really extending the mode,
1212 we would have to distinguish between zero-extension
1213 and sign-extension. */
1214 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1217 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1218 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1224 if (op_mode
== VOIDmode
)
1226 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1228 /* If we were really extending the mode,
1229 we would have to distinguish between zero-extension
1230 and sign-extension. */
1231 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1234 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1237 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1239 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1240 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1248 case FLOAT_TRUNCATE
:
1259 return gen_int_mode (val
, mode
);
1262 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1263 for a DImode operation on a CONST_INT. */
1264 else if (GET_MODE (op
) == VOIDmode
1265 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1266 && (GET_CODE (op
) == CONST_DOUBLE
1267 || CONST_INT_P (op
)))
1269 unsigned HOST_WIDE_INT l1
, lv
;
1270 HOST_WIDE_INT h1
, hv
;
1272 if (GET_CODE (op
) == CONST_DOUBLE
)
1273 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1275 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1285 neg_double (l1
, h1
, &lv
, &hv
);
1290 neg_double (l1
, h1
, &lv
, &hv
);
1302 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
1305 lv
= exact_log2 (l1
& -l1
) + 1;
1311 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1312 - HOST_BITS_PER_WIDE_INT
;
1314 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1315 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1316 lv
= GET_MODE_BITSIZE (mode
);
1322 lv
= exact_log2 (l1
& -l1
);
1324 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
1325 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1326 lv
= GET_MODE_BITSIZE (mode
);
1354 for (s
= 0; s
< width
; s
+= 8)
1356 unsigned int d
= width
- s
- 8;
1357 unsigned HOST_WIDE_INT byte
;
1359 if (s
< HOST_BITS_PER_WIDE_INT
)
1360 byte
= (l1
>> s
) & 0xff;
1362 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1364 if (d
< HOST_BITS_PER_WIDE_INT
)
1367 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1373 /* This is just a change-of-mode, so do nothing. */
1378 gcc_assert (op_mode
!= VOIDmode
);
1380 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1384 lv
= l1
& GET_MODE_MASK (op_mode
);
1388 if (op_mode
== VOIDmode
1389 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1393 lv
= l1
& GET_MODE_MASK (op_mode
);
1394 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1395 && (lv
& ((HOST_WIDE_INT
) 1
1396 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1397 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1399 hv
= HWI_SIGN_EXTEND (lv
);
1410 return immed_double_const (lv
, hv
, mode
);
1413 else if (GET_CODE (op
) == CONST_DOUBLE
1414 && SCALAR_FLOAT_MODE_P (mode
))
1416 REAL_VALUE_TYPE d
, t
;
1417 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1422 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1424 real_sqrt (&t
, mode
, &d
);
1428 d
= REAL_VALUE_ABS (d
);
1431 d
= REAL_VALUE_NEGATE (d
);
1433 case FLOAT_TRUNCATE
:
1434 d
= real_value_truncate (mode
, d
);
1437 /* All this does is change the mode. */
1440 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1447 real_to_target (tmp
, &d
, GET_MODE (op
));
1448 for (i
= 0; i
< 4; i
++)
1450 real_from_target (&d
, tmp
, mode
);
1456 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1459 else if (GET_CODE (op
) == CONST_DOUBLE
1460 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1461 && GET_MODE_CLASS (mode
) == MODE_INT
1462 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1464 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1465 operators are intentionally left unspecified (to ease implementation
1466 by target backends), for consistency, this routine implements the
1467 same semantics for constant folding as used by the middle-end. */
1469 /* This was formerly used only for non-IEEE float.
1470 eggert@twinsun.com says it is safe for IEEE also. */
1471 HOST_WIDE_INT xh
, xl
, th
, tl
;
1472 REAL_VALUE_TYPE x
, t
;
1473 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1477 if (REAL_VALUE_ISNAN (x
))
1480 /* Test against the signed upper bound. */
1481 if (width
> HOST_BITS_PER_WIDE_INT
)
1483 th
= ((unsigned HOST_WIDE_INT
) 1
1484 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1490 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1492 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1493 if (REAL_VALUES_LESS (t
, x
))
1500 /* Test against the signed lower bound. */
1501 if (width
> HOST_BITS_PER_WIDE_INT
)
1503 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1509 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1511 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1512 if (REAL_VALUES_LESS (x
, t
))
1518 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1522 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1525 /* Test against the unsigned upper bound. */
1526 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1531 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1533 th
= ((unsigned HOST_WIDE_INT
) 1
1534 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1540 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1542 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1543 if (REAL_VALUES_LESS (t
, x
))
1550 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1556 return immed_double_const (xl
, xh
, mode
);
1562 /* Subroutine of simplify_binary_operation to simplify a commutative,
1563 associative binary operation CODE with result mode MODE, operating
1564 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1565 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1566 canonicalization is possible. */
1569 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1574 /* Linearize the operator to the left. */
1575 if (GET_CODE (op1
) == code
)
1577 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1578 if (GET_CODE (op0
) == code
)
1580 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1581 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1584 /* "a op (b op c)" becomes "(b op c) op a". */
1585 if (! swap_commutative_operands_p (op1
, op0
))
1586 return simplify_gen_binary (code
, mode
, op1
, op0
);
1593 if (GET_CODE (op0
) == code
)
1595 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1596 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1598 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1599 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1602 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1603 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1605 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1607 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1608 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1610 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1617 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1618 and OP1. Return 0 if no simplification is possible.
1620 Don't use this for relational operations such as EQ or LT.
1621 Use simplify_relational_operation instead. */
1623 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1626 rtx trueop0
, trueop1
;
1629 /* Relational operations don't work here. We must know the mode
1630 of the operands in order to do the comparison correctly.
1631 Assuming a full word can give incorrect results.
1632 Consider comparing 128 with -128 in QImode. */
1633 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1634 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1636 /* Make sure the constant is second. */
1637 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1638 && swap_commutative_operands_p (op0
, op1
))
1640 tem
= op0
, op0
= op1
, op1
= tem
;
1643 trueop0
= avoid_constant_pool_reference (op0
);
1644 trueop1
= avoid_constant_pool_reference (op1
);
1646 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1649 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1652 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1653 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1654 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1655 actual constants. */
1658 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1659 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1661 rtx tem
, reversed
, opleft
, opright
;
1663 unsigned int width
= GET_MODE_BITSIZE (mode
);
1665 /* Even if we can't compute a constant result,
1666 there are some cases worth simplifying. */
1671 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1672 when x is NaN, infinite, or finite and nonzero. They aren't
1673 when x is -0 and the rounding mode is not towards -infinity,
1674 since (-0) + 0 is then 0. */
1675 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1678 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1679 transformations are safe even for IEEE. */
1680 if (GET_CODE (op0
) == NEG
)
1681 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1682 else if (GET_CODE (op1
) == NEG
)
1683 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1685 /* (~a) + 1 -> -a */
1686 if (INTEGRAL_MODE_P (mode
)
1687 && GET_CODE (op0
) == NOT
1688 && trueop1
== const1_rtx
)
1689 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1691 /* Handle both-operands-constant cases. We can only add
1692 CONST_INTs to constants since the sum of relocatable symbols
1693 can't be handled by most assemblers. Don't add CONST_INT
1694 to CONST_INT since overflow won't be computed properly if wider
1695 than HOST_BITS_PER_WIDE_INT. */
1697 if ((GET_CODE (op0
) == CONST
1698 || GET_CODE (op0
) == SYMBOL_REF
1699 || GET_CODE (op0
) == LABEL_REF
)
1700 && CONST_INT_P (op1
))
1701 return plus_constant (op0
, INTVAL (op1
));
1702 else if ((GET_CODE (op1
) == CONST
1703 || GET_CODE (op1
) == SYMBOL_REF
1704 || GET_CODE (op1
) == LABEL_REF
)
1705 && CONST_INT_P (op0
))
1706 return plus_constant (op1
, INTVAL (op0
));
1708 /* See if this is something like X * C - X or vice versa or
1709 if the multiplication is written as a shift. If so, we can
1710 distribute and make a new multiply, shift, or maybe just
1711 have X (if C is 2 in the example above). But don't make
1712 something more expensive than we had before. */
1714 if (SCALAR_INT_MODE_P (mode
))
1716 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1717 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1718 rtx lhs
= op0
, rhs
= op1
;
1720 if (GET_CODE (lhs
) == NEG
)
1724 lhs
= XEXP (lhs
, 0);
1726 else if (GET_CODE (lhs
) == MULT
1727 && CONST_INT_P (XEXP (lhs
, 1)))
1729 coeff0l
= INTVAL (XEXP (lhs
, 1));
1730 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1731 lhs
= XEXP (lhs
, 0);
1733 else if (GET_CODE (lhs
) == ASHIFT
1734 && CONST_INT_P (XEXP (lhs
, 1))
1735 && INTVAL (XEXP (lhs
, 1)) >= 0
1736 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1738 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1740 lhs
= XEXP (lhs
, 0);
1743 if (GET_CODE (rhs
) == NEG
)
1747 rhs
= XEXP (rhs
, 0);
1749 else if (GET_CODE (rhs
) == MULT
1750 && CONST_INT_P (XEXP (rhs
, 1)))
1752 coeff1l
= INTVAL (XEXP (rhs
, 1));
1753 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1754 rhs
= XEXP (rhs
, 0);
1756 else if (GET_CODE (rhs
) == ASHIFT
1757 && CONST_INT_P (XEXP (rhs
, 1))
1758 && INTVAL (XEXP (rhs
, 1)) >= 0
1759 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1761 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1763 rhs
= XEXP (rhs
, 0);
1766 if (rtx_equal_p (lhs
, rhs
))
1768 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1770 unsigned HOST_WIDE_INT l
;
1772 bool speed
= optimize_function_for_speed_p (cfun
);
1774 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1775 coeff
= immed_double_const (l
, h
, mode
);
1777 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1778 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
1783 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1784 if ((CONST_INT_P (op1
)
1785 || GET_CODE (op1
) == CONST_DOUBLE
)
1786 && GET_CODE (op0
) == XOR
1787 && (CONST_INT_P (XEXP (op0
, 1))
1788 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1789 && mode_signbit_p (mode
, op1
))
1790 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1791 simplify_gen_binary (XOR
, mode
, op1
,
1794 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1795 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1796 && GET_CODE (op0
) == MULT
1797 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1801 in1
= XEXP (XEXP (op0
, 0), 0);
1802 in2
= XEXP (op0
, 1);
1803 return simplify_gen_binary (MINUS
, mode
, op1
,
1804 simplify_gen_binary (MULT
, mode
,
1808 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1809 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1811 if (COMPARISON_P (op0
)
1812 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1813 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1814 && (reversed
= reversed_comparison (op0
, mode
)))
1816 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1818 /* If one of the operands is a PLUS or a MINUS, see if we can
1819 simplify this by the associative law.
1820 Don't use the associative law for floating point.
1821 The inaccuracy makes it nonassociative,
1822 and subtle programs can break if operations are associated. */
1824 if (INTEGRAL_MODE_P (mode
)
1825 && (plus_minus_operand_p (op0
)
1826 || plus_minus_operand_p (op1
))
1827 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1830 /* Reassociate floating point addition only when the user
1831 specifies associative math operations. */
1832 if (FLOAT_MODE_P (mode
)
1833 && flag_associative_math
)
1835 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1842 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1843 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1844 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1845 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1847 rtx xop00
= XEXP (op0
, 0);
1848 rtx xop10
= XEXP (op1
, 0);
1851 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1853 if (REG_P (xop00
) && REG_P (xop10
)
1854 && GET_MODE (xop00
) == GET_MODE (xop10
)
1855 && REGNO (xop00
) == REGNO (xop10
)
1856 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1857 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1864 /* We can't assume x-x is 0 even with non-IEEE floating point,
1865 but since it is zero except in very strange circumstances, we
1866 will treat it as zero with -ffinite-math-only. */
1867 if (rtx_equal_p (trueop0
, trueop1
)
1868 && ! side_effects_p (op0
)
1869 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
1870 return CONST0_RTX (mode
);
1872 /* Change subtraction from zero into negation. (0 - x) is the
1873 same as -x when x is NaN, infinite, or finite and nonzero.
1874 But if the mode has signed zeros, and does not round towards
1875 -infinity, then 0 - 0 is 0, not -0. */
1876 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1877 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1879 /* (-1 - a) is ~a. */
1880 if (trueop0
== constm1_rtx
)
1881 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1883 /* Subtracting 0 has no effect unless the mode has signed zeros
1884 and supports rounding towards -infinity. In such a case,
1886 if (!(HONOR_SIGNED_ZEROS (mode
)
1887 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1888 && trueop1
== CONST0_RTX (mode
))
1891 /* See if this is something like X * C - X or vice versa or
1892 if the multiplication is written as a shift. If so, we can
1893 distribute and make a new multiply, shift, or maybe just
1894 have X (if C is 2 in the example above). But don't make
1895 something more expensive than we had before. */
1897 if (SCALAR_INT_MODE_P (mode
))
1899 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1900 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1901 rtx lhs
= op0
, rhs
= op1
;
1903 if (GET_CODE (lhs
) == NEG
)
1907 lhs
= XEXP (lhs
, 0);
1909 else if (GET_CODE (lhs
) == MULT
1910 && CONST_INT_P (XEXP (lhs
, 1)))
1912 coeff0l
= INTVAL (XEXP (lhs
, 1));
1913 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1914 lhs
= XEXP (lhs
, 0);
1916 else if (GET_CODE (lhs
) == ASHIFT
1917 && CONST_INT_P (XEXP (lhs
, 1))
1918 && INTVAL (XEXP (lhs
, 1)) >= 0
1919 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1921 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1923 lhs
= XEXP (lhs
, 0);
1926 if (GET_CODE (rhs
) == NEG
)
1930 rhs
= XEXP (rhs
, 0);
1932 else if (GET_CODE (rhs
) == MULT
1933 && CONST_INT_P (XEXP (rhs
, 1)))
1935 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1936 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1937 rhs
= XEXP (rhs
, 0);
1939 else if (GET_CODE (rhs
) == ASHIFT
1940 && CONST_INT_P (XEXP (rhs
, 1))
1941 && INTVAL (XEXP (rhs
, 1)) >= 0
1942 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1944 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1946 rhs
= XEXP (rhs
, 0);
1949 if (rtx_equal_p (lhs
, rhs
))
1951 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1953 unsigned HOST_WIDE_INT l
;
1955 bool speed
= optimize_function_for_speed_p (cfun
);
1957 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1958 coeff
= immed_double_const (l
, h
, mode
);
1960 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1961 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
1966 /* (a - (-b)) -> (a + b). True even for IEEE. */
1967 if (GET_CODE (op1
) == NEG
)
1968 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1970 /* (-x - c) may be simplified as (-c - x). */
1971 if (GET_CODE (op0
) == NEG
1972 && (CONST_INT_P (op1
)
1973 || GET_CODE (op1
) == CONST_DOUBLE
))
1975 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1977 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1980 /* Don't let a relocatable value get a negative coeff. */
1981 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
1982 return simplify_gen_binary (PLUS
, mode
,
1984 neg_const_int (mode
, op1
));
1986 /* (x - (x & y)) -> (x & ~y) */
1987 if (GET_CODE (op1
) == AND
)
1989 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1991 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1992 GET_MODE (XEXP (op1
, 1)));
1993 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1995 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1997 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1998 GET_MODE (XEXP (op1
, 0)));
1999 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2003 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2004 by reversing the comparison code if valid. */
2005 if (STORE_FLAG_VALUE
== 1
2006 && trueop0
== const1_rtx
2007 && COMPARISON_P (op1
)
2008 && (reversed
= reversed_comparison (op1
, mode
)))
2011 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2012 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2013 && GET_CODE (op1
) == MULT
2014 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2018 in1
= XEXP (XEXP (op1
, 0), 0);
2019 in2
= XEXP (op1
, 1);
2020 return simplify_gen_binary (PLUS
, mode
,
2021 simplify_gen_binary (MULT
, mode
,
2026 /* Canonicalize (minus (neg A) (mult B C)) to
2027 (minus (mult (neg B) C) A). */
2028 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2029 && GET_CODE (op1
) == MULT
2030 && GET_CODE (op0
) == NEG
)
2034 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2035 in2
= XEXP (op1
, 1);
2036 return simplify_gen_binary (MINUS
, mode
,
2037 simplify_gen_binary (MULT
, mode
,
2042 /* If one of the operands is a PLUS or a MINUS, see if we can
2043 simplify this by the associative law. This will, for example,
2044 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2045 Don't use the associative law for floating point.
2046 The inaccuracy makes it nonassociative,
2047 and subtle programs can break if operations are associated. */
2049 if (INTEGRAL_MODE_P (mode
)
2050 && (plus_minus_operand_p (op0
)
2051 || plus_minus_operand_p (op1
))
2052 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2057 if (trueop1
== constm1_rtx
)
2058 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2060 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2061 x is NaN, since x * 0 is then also NaN. Nor is it valid
2062 when the mode has signed zeros, since multiplying a negative
2063 number by 0 will give -0, not 0. */
2064 if (!HONOR_NANS (mode
)
2065 && !HONOR_SIGNED_ZEROS (mode
)
2066 && trueop1
== CONST0_RTX (mode
)
2067 && ! side_effects_p (op0
))
2070 /* In IEEE floating point, x*1 is not equivalent to x for
2072 if (!HONOR_SNANS (mode
)
2073 && trueop1
== CONST1_RTX (mode
))
2076 /* Convert multiply by constant power of two into shift unless
2077 we are still generating RTL. This test is a kludge. */
2078 if (CONST_INT_P (trueop1
)
2079 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
2080 /* If the mode is larger than the host word size, and the
2081 uppermost bit is set, then this isn't a power of two due
2082 to implicit sign extension. */
2083 && (width
<= HOST_BITS_PER_WIDE_INT
2084 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2085 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2087 /* Likewise for multipliers wider than a word. */
2088 if (GET_CODE (trueop1
) == CONST_DOUBLE
2089 && (GET_MODE (trueop1
) == VOIDmode
2090 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
2091 && GET_MODE (op0
) == mode
2092 && CONST_DOUBLE_LOW (trueop1
) == 0
2093 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
2094 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2095 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2097 /* x*2 is x+x and x*(-1) is -x */
2098 if (GET_CODE (trueop1
) == CONST_DOUBLE
2099 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2100 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2101 && GET_MODE (op0
) == mode
)
2104 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2106 if (REAL_VALUES_EQUAL (d
, dconst2
))
2107 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2109 if (!HONOR_SNANS (mode
)
2110 && REAL_VALUES_EQUAL (d
, dconstm1
))
2111 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2114 /* Optimize -x * -x as x * x. */
2115 if (FLOAT_MODE_P (mode
)
2116 && GET_CODE (op0
) == NEG
2117 && GET_CODE (op1
) == NEG
2118 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2119 && !side_effects_p (XEXP (op0
, 0)))
2120 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2122 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2123 if (SCALAR_FLOAT_MODE_P (mode
)
2124 && GET_CODE (op0
) == ABS
2125 && GET_CODE (op1
) == ABS
2126 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2127 && !side_effects_p (XEXP (op0
, 0)))
2128 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2130 /* Reassociate multiplication, but for floating point MULTs
2131 only when the user specifies unsafe math optimizations. */
2132 if (! FLOAT_MODE_P (mode
)
2133 || flag_unsafe_math_optimizations
)
2135 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2142 if (trueop1
== const0_rtx
)
2144 if (CONST_INT_P (trueop1
)
2145 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2146 == GET_MODE_MASK (mode
)))
2148 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2150 /* A | (~A) -> -1 */
2151 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2152 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2153 && ! side_effects_p (op0
)
2154 && SCALAR_INT_MODE_P (mode
))
2157 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2158 if (CONST_INT_P (op1
)
2159 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2160 && (nonzero_bits (op0
, mode
) & ~INTVAL (op1
)) == 0)
2163 /* Canonicalize (X & C1) | C2. */
2164 if (GET_CODE (op0
) == AND
2165 && CONST_INT_P (trueop1
)
2166 && CONST_INT_P (XEXP (op0
, 1)))
2168 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2169 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2170 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2172 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2174 && !side_effects_p (XEXP (op0
, 0)))
2177 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2178 if (((c1
|c2
) & mask
) == mask
)
2179 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2181 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2182 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2184 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2185 gen_int_mode (c1
& ~c2
, mode
));
2186 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2190 /* Convert (A & B) | A to A. */
2191 if (GET_CODE (op0
) == AND
2192 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2193 || rtx_equal_p (XEXP (op0
, 1), op1
))
2194 && ! side_effects_p (XEXP (op0
, 0))
2195 && ! side_effects_p (XEXP (op0
, 1)))
2198 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2199 mode size to (rotate A CX). */
2201 if (GET_CODE (op1
) == ASHIFT
2202 || GET_CODE (op1
) == SUBREG
)
2213 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2214 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2215 && CONST_INT_P (XEXP (opleft
, 1))
2216 && CONST_INT_P (XEXP (opright
, 1))
2217 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2218 == GET_MODE_BITSIZE (mode
)))
2219 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2221 /* Same, but for ashift that has been "simplified" to a wider mode
2222 by simplify_shift_const. */
2224 if (GET_CODE (opleft
) == SUBREG
2225 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2226 && GET_CODE (opright
) == LSHIFTRT
2227 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2228 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2229 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2230 && (GET_MODE_SIZE (GET_MODE (opleft
))
2231 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2232 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2233 SUBREG_REG (XEXP (opright
, 0)))
2234 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2235 && CONST_INT_P (XEXP (opright
, 1))
2236 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2237 == GET_MODE_BITSIZE (mode
)))
2238 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2239 XEXP (SUBREG_REG (opleft
), 1));
2241 /* If we have (ior (and (X C1) C2)), simplify this by making
2242 C1 as small as possible if C1 actually changes. */
2243 if (CONST_INT_P (op1
)
2244 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2245 || INTVAL (op1
) > 0)
2246 && GET_CODE (op0
) == AND
2247 && CONST_INT_P (XEXP (op0
, 1))
2248 && CONST_INT_P (op1
)
2249 && (INTVAL (XEXP (op0
, 1)) & INTVAL (op1
)) != 0)
2250 return simplify_gen_binary (IOR
, mode
,
2252 (AND
, mode
, XEXP (op0
, 0),
2253 GEN_INT (INTVAL (XEXP (op0
, 1))
2257 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2258 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2259 the PLUS does not affect any of the bits in OP1: then we can do
2260 the IOR as a PLUS and we can associate. This is valid if OP1
2261 can be safely shifted left C bits. */
2262 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2263 && GET_CODE (XEXP (op0
, 0)) == PLUS
2264 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2265 && CONST_INT_P (XEXP (op0
, 1))
2266 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2268 int count
= INTVAL (XEXP (op0
, 1));
2269 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2271 if (mask
>> count
== INTVAL (trueop1
)
2272 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2273 return simplify_gen_binary (ASHIFTRT
, mode
,
2274 plus_constant (XEXP (op0
, 0), mask
),
2278 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2284 if (trueop1
== const0_rtx
)
2286 if (CONST_INT_P (trueop1
)
2287 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2288 == GET_MODE_MASK (mode
)))
2289 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2290 if (rtx_equal_p (trueop0
, trueop1
)
2291 && ! side_effects_p (op0
)
2292 && GET_MODE_CLASS (mode
) != MODE_CC
)
2293 return CONST0_RTX (mode
);
2295 /* Canonicalize XOR of the most significant bit to PLUS. */
2296 if ((CONST_INT_P (op1
)
2297 || GET_CODE (op1
) == CONST_DOUBLE
)
2298 && mode_signbit_p (mode
, op1
))
2299 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2300 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2301 if ((CONST_INT_P (op1
)
2302 || GET_CODE (op1
) == CONST_DOUBLE
)
2303 && GET_CODE (op0
) == PLUS
2304 && (CONST_INT_P (XEXP (op0
, 1))
2305 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2306 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2307 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2308 simplify_gen_binary (XOR
, mode
, op1
,
2311 /* If we are XORing two things that have no bits in common,
2312 convert them into an IOR. This helps to detect rotation encoded
2313 using those methods and possibly other simplifications. */
2315 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2316 && (nonzero_bits (op0
, mode
)
2317 & nonzero_bits (op1
, mode
)) == 0)
2318 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2320 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2321 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2324 int num_negated
= 0;
2326 if (GET_CODE (op0
) == NOT
)
2327 num_negated
++, op0
= XEXP (op0
, 0);
2328 if (GET_CODE (op1
) == NOT
)
2329 num_negated
++, op1
= XEXP (op1
, 0);
2331 if (num_negated
== 2)
2332 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2333 else if (num_negated
== 1)
2334 return simplify_gen_unary (NOT
, mode
,
2335 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2339 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2340 correspond to a machine insn or result in further simplifications
2341 if B is a constant. */
2343 if (GET_CODE (op0
) == AND
2344 && rtx_equal_p (XEXP (op0
, 1), op1
)
2345 && ! side_effects_p (op1
))
2346 return simplify_gen_binary (AND
, mode
,
2347 simplify_gen_unary (NOT
, mode
,
2348 XEXP (op0
, 0), mode
),
2351 else if (GET_CODE (op0
) == AND
2352 && rtx_equal_p (XEXP (op0
, 0), op1
)
2353 && ! side_effects_p (op1
))
2354 return simplify_gen_binary (AND
, mode
,
2355 simplify_gen_unary (NOT
, mode
,
2356 XEXP (op0
, 1), mode
),
2359 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2360 comparison if STORE_FLAG_VALUE is 1. */
2361 if (STORE_FLAG_VALUE
== 1
2362 && trueop1
== const1_rtx
2363 && COMPARISON_P (op0
)
2364 && (reversed
= reversed_comparison (op0
, mode
)))
2367 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2368 is (lt foo (const_int 0)), so we can perform the above
2369 simplification if STORE_FLAG_VALUE is 1. */
2371 if (STORE_FLAG_VALUE
== 1
2372 && trueop1
== const1_rtx
2373 && GET_CODE (op0
) == LSHIFTRT
2374 && CONST_INT_P (XEXP (op0
, 1))
2375 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2376 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2378 /* (xor (comparison foo bar) (const_int sign-bit))
2379 when STORE_FLAG_VALUE is the sign bit. */
2380 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2381 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2382 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2383 && trueop1
== const_true_rtx
2384 && COMPARISON_P (op0
)
2385 && (reversed
= reversed_comparison (op0
, mode
)))
2388 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2394 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2396 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
2398 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2399 HOST_WIDE_INT nzop1
;
2400 if (CONST_INT_P (trueop1
))
2402 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2403 /* If we are turning off bits already known off in OP0, we need
2405 if ((nzop0
& ~val1
) == 0)
2408 nzop1
= nonzero_bits (trueop1
, mode
);
2409 /* If we are clearing all the nonzero bits, the result is zero. */
2410 if ((nzop1
& nzop0
) == 0
2411 && !side_effects_p (op0
) && !side_effects_p (op1
))
2412 return CONST0_RTX (mode
);
2414 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2415 && GET_MODE_CLASS (mode
) != MODE_CC
)
2418 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2419 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2420 && ! side_effects_p (op0
)
2421 && GET_MODE_CLASS (mode
) != MODE_CC
)
2422 return CONST0_RTX (mode
);
2424 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2425 there are no nonzero bits of C outside of X's mode. */
2426 if ((GET_CODE (op0
) == SIGN_EXTEND
2427 || GET_CODE (op0
) == ZERO_EXTEND
)
2428 && CONST_INT_P (trueop1
)
2429 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2430 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2431 & INTVAL (trueop1
)) == 0)
2433 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2434 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2435 gen_int_mode (INTVAL (trueop1
),
2437 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2440 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2441 we might be able to further simplify the AND with X and potentially
2442 remove the truncation altogether. */
2443 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2445 rtx x
= XEXP (op0
, 0);
2446 enum machine_mode xmode
= GET_MODE (x
);
2447 tem
= simplify_gen_binary (AND
, xmode
, x
,
2448 gen_int_mode (INTVAL (trueop1
), xmode
));
2449 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2452 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2453 if (GET_CODE (op0
) == IOR
2454 && CONST_INT_P (trueop1
)
2455 && CONST_INT_P (XEXP (op0
, 1)))
2457 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2458 return simplify_gen_binary (IOR
, mode
,
2459 simplify_gen_binary (AND
, mode
,
2460 XEXP (op0
, 0), op1
),
2461 gen_int_mode (tmp
, mode
));
2464 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2465 insn (and may simplify more). */
2466 if (GET_CODE (op0
) == XOR
2467 && rtx_equal_p (XEXP (op0
, 0), op1
)
2468 && ! side_effects_p (op1
))
2469 return simplify_gen_binary (AND
, mode
,
2470 simplify_gen_unary (NOT
, mode
,
2471 XEXP (op0
, 1), mode
),
2474 if (GET_CODE (op0
) == XOR
2475 && rtx_equal_p (XEXP (op0
, 1), op1
)
2476 && ! side_effects_p (op1
))
2477 return simplify_gen_binary (AND
, mode
,
2478 simplify_gen_unary (NOT
, mode
,
2479 XEXP (op0
, 0), mode
),
2482 /* Similarly for (~(A ^ B)) & A. */
2483 if (GET_CODE (op0
) == NOT
2484 && GET_CODE (XEXP (op0
, 0)) == XOR
2485 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2486 && ! side_effects_p (op1
))
2487 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2489 if (GET_CODE (op0
) == NOT
2490 && GET_CODE (XEXP (op0
, 0)) == XOR
2491 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2492 && ! side_effects_p (op1
))
2493 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2495 /* Convert (A | B) & A to A. */
2496 if (GET_CODE (op0
) == IOR
2497 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2498 || rtx_equal_p (XEXP (op0
, 1), op1
))
2499 && ! side_effects_p (XEXP (op0
, 0))
2500 && ! side_effects_p (XEXP (op0
, 1)))
2503 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2504 ((A & N) + B) & M -> (A + B) & M
2505 Similarly if (N & M) == 0,
2506 ((A | N) + B) & M -> (A + B) & M
2507 and for - instead of + and/or ^ instead of |.
2508 Also, if (N & M) == 0, then
2509 (A +- N) & M -> A & M. */
2510 if (CONST_INT_P (trueop1
)
2511 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2512 && ~INTVAL (trueop1
)
2513 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
2514 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2519 pmop
[0] = XEXP (op0
, 0);
2520 pmop
[1] = XEXP (op0
, 1);
2522 if (CONST_INT_P (pmop
[1])
2523 && (INTVAL (pmop
[1]) & INTVAL (trueop1
)) == 0)
2524 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2526 for (which
= 0; which
< 2; which
++)
2529 switch (GET_CODE (tem
))
2532 if (CONST_INT_P (XEXP (tem
, 1))
2533 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
2534 == INTVAL (trueop1
))
2535 pmop
[which
] = XEXP (tem
, 0);
2539 if (CONST_INT_P (XEXP (tem
, 1))
2540 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
2541 pmop
[which
] = XEXP (tem
, 0);
2548 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2550 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2552 return simplify_gen_binary (code
, mode
, tem
, op1
);
2556 /* (and X (ior (not X) Y) -> (and X Y) */
2557 if (GET_CODE (op1
) == IOR
2558 && GET_CODE (XEXP (op1
, 0)) == NOT
2559 && op0
== XEXP (XEXP (op1
, 0), 0))
2560 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2562 /* (and (ior (not X) Y) X) -> (and X Y) */
2563 if (GET_CODE (op0
) == IOR
2564 && GET_CODE (XEXP (op0
, 0)) == NOT
2565 && op1
== XEXP (XEXP (op0
, 0), 0))
2566 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2568 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2574 /* 0/x is 0 (or x&0 if x has side-effects). */
2575 if (trueop0
== CONST0_RTX (mode
))
2577 if (side_effects_p (op1
))
2578 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2582 if (trueop1
== CONST1_RTX (mode
))
2583 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2584 /* Convert divide by power of two into shift. */
2585 if (CONST_INT_P (trueop1
)
2586 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
2587 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2591 /* Handle floating point and integers separately. */
2592 if (SCALAR_FLOAT_MODE_P (mode
))
2594 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2595 safe for modes with NaNs, since 0.0 / 0.0 will then be
2596 NaN rather than 0.0. Nor is it safe for modes with signed
2597 zeros, since dividing 0 by a negative number gives -0.0 */
2598 if (trueop0
== CONST0_RTX (mode
)
2599 && !HONOR_NANS (mode
)
2600 && !HONOR_SIGNED_ZEROS (mode
)
2601 && ! side_effects_p (op1
))
2604 if (trueop1
== CONST1_RTX (mode
)
2605 && !HONOR_SNANS (mode
))
2608 if (GET_CODE (trueop1
) == CONST_DOUBLE
2609 && trueop1
!= CONST0_RTX (mode
))
2612 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2615 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2616 && !HONOR_SNANS (mode
))
2617 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2619 /* Change FP division by a constant into multiplication.
2620 Only do this with -freciprocal-math. */
2621 if (flag_reciprocal_math
2622 && !REAL_VALUES_EQUAL (d
, dconst0
))
2624 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2625 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2626 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2632 /* 0/x is 0 (or x&0 if x has side-effects). */
2633 if (trueop0
== CONST0_RTX (mode
))
2635 if (side_effects_p (op1
))
2636 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2640 if (trueop1
== CONST1_RTX (mode
))
2641 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2643 if (trueop1
== constm1_rtx
)
2645 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2646 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2652 /* 0%x is 0 (or x&0 if x has side-effects). */
2653 if (trueop0
== CONST0_RTX (mode
))
2655 if (side_effects_p (op1
))
2656 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2659 /* x%1 is 0 (of x&0 if x has side-effects). */
2660 if (trueop1
== CONST1_RTX (mode
))
2662 if (side_effects_p (op0
))
2663 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2664 return CONST0_RTX (mode
);
2666 /* Implement modulus by power of two as AND. */
2667 if (CONST_INT_P (trueop1
)
2668 && exact_log2 (INTVAL (trueop1
)) > 0)
2669 return simplify_gen_binary (AND
, mode
, op0
,
2670 GEN_INT (INTVAL (op1
) - 1));
2674 /* 0%x is 0 (or x&0 if x has side-effects). */
2675 if (trueop0
== CONST0_RTX (mode
))
2677 if (side_effects_p (op1
))
2678 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2681 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2682 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2684 if (side_effects_p (op0
))
2685 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2686 return CONST0_RTX (mode
);
2693 if (trueop1
== CONST0_RTX (mode
))
2695 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2697 /* Rotating ~0 always results in ~0. */
2698 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
2699 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2700 && ! side_effects_p (op1
))
2703 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
2705 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
2706 if (val
!= INTVAL (op1
))
2707 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
2714 if (trueop1
== CONST0_RTX (mode
))
2716 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2718 goto canonicalize_shift
;
2721 if (trueop1
== CONST0_RTX (mode
))
2723 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2725 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2726 if (GET_CODE (op0
) == CLZ
2727 && CONST_INT_P (trueop1
)
2728 && STORE_FLAG_VALUE
== 1
2729 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
2731 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2732 unsigned HOST_WIDE_INT zero_val
= 0;
2734 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
2735 && zero_val
== GET_MODE_BITSIZE (imode
)
2736 && INTVAL (trueop1
) == exact_log2 (zero_val
))
2737 return simplify_gen_relational (EQ
, mode
, imode
,
2738 XEXP (op0
, 0), const0_rtx
);
2740 goto canonicalize_shift
;
2743 if (width
<= HOST_BITS_PER_WIDE_INT
2744 && CONST_INT_P (trueop1
)
2745 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2746 && ! side_effects_p (op0
))
2748 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2750 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2756 if (width
<= HOST_BITS_PER_WIDE_INT
2757 && CONST_INT_P (trueop1
)
2758 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2759 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2760 && ! side_effects_p (op0
))
2762 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2764 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2770 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2772 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2774 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2780 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2782 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2784 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2797 /* ??? There are simplifications that can be done. */
2801 if (!VECTOR_MODE_P (mode
))
2803 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2804 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2805 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2806 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2807 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
2809 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2810 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2813 /* Extract a scalar element from a nested VEC_SELECT expression
2814 (with optional nested VEC_CONCAT expression). Some targets
2815 (i386) extract scalar element from a vector using chain of
2816 nested VEC_SELECT expressions. When input operand is a memory
2817 operand, this operation can be simplified to a simple scalar
2818 load from an offseted memory address. */
2819 if (GET_CODE (trueop0
) == VEC_SELECT
)
2821 rtx op0
= XEXP (trueop0
, 0);
2822 rtx op1
= XEXP (trueop0
, 1);
2824 enum machine_mode opmode
= GET_MODE (op0
);
2825 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
2826 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
2828 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
2834 gcc_assert (GET_CODE (op1
) == PARALLEL
);
2835 gcc_assert (i
< n_elts
);
2837 /* Select element, pointed by nested selector. */
2838 elem
= INTVAL (XVECEXP (op1
, 0, i
));
2840 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2841 if (GET_CODE (op0
) == VEC_CONCAT
)
2843 rtx op00
= XEXP (op0
, 0);
2844 rtx op01
= XEXP (op0
, 1);
2846 enum machine_mode mode00
, mode01
;
2847 int n_elts00
, n_elts01
;
2849 mode00
= GET_MODE (op00
);
2850 mode01
= GET_MODE (op01
);
2852 /* Find out number of elements of each operand. */
2853 if (VECTOR_MODE_P (mode00
))
2855 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
2856 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
2861 if (VECTOR_MODE_P (mode01
))
2863 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
2864 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
2869 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
2871 /* Select correct operand of VEC_CONCAT
2872 and adjust selector. */
2873 if (elem
< n_elts01
)
2884 vec
= rtvec_alloc (1);
2885 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
2887 tmp
= gen_rtx_fmt_ee (code
, mode
,
2888 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
2894 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2895 gcc_assert (GET_MODE_INNER (mode
)
2896 == GET_MODE_INNER (GET_MODE (trueop0
)));
2897 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2899 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2901 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2902 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2903 rtvec v
= rtvec_alloc (n_elts
);
2906 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2907 for (i
= 0; i
< n_elts
; i
++)
2909 rtx x
= XVECEXP (trueop1
, 0, i
);
2911 gcc_assert (CONST_INT_P (x
));
2912 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2916 return gen_rtx_CONST_VECTOR (mode
, v
);
2920 if (XVECLEN (trueop1
, 0) == 1
2921 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
2922 && GET_CODE (trueop0
) == VEC_CONCAT
)
2925 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
2927 /* Try to find the element in the VEC_CONCAT. */
2928 while (GET_MODE (vec
) != mode
2929 && GET_CODE (vec
) == VEC_CONCAT
)
2931 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
2932 if (offset
< vec_size
)
2933 vec
= XEXP (vec
, 0);
2937 vec
= XEXP (vec
, 1);
2939 vec
= avoid_constant_pool_reference (vec
);
2942 if (GET_MODE (vec
) == mode
)
2949 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2950 ? GET_MODE (trueop0
)
2951 : GET_MODE_INNER (mode
));
2952 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2953 ? GET_MODE (trueop1
)
2954 : GET_MODE_INNER (mode
));
2956 gcc_assert (VECTOR_MODE_P (mode
));
2957 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2958 == GET_MODE_SIZE (mode
));
2960 if (VECTOR_MODE_P (op0_mode
))
2961 gcc_assert (GET_MODE_INNER (mode
)
2962 == GET_MODE_INNER (op0_mode
));
2964 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2966 if (VECTOR_MODE_P (op1_mode
))
2967 gcc_assert (GET_MODE_INNER (mode
)
2968 == GET_MODE_INNER (op1_mode
));
2970 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2972 if ((GET_CODE (trueop0
) == CONST_VECTOR
2973 || CONST_INT_P (trueop0
)
2974 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2975 && (GET_CODE (trueop1
) == CONST_VECTOR
2976 || CONST_INT_P (trueop1
)
2977 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2979 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2980 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2981 rtvec v
= rtvec_alloc (n_elts
);
2983 unsigned in_n_elts
= 1;
2985 if (VECTOR_MODE_P (op0_mode
))
2986 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2987 for (i
= 0; i
< n_elts
; i
++)
2991 if (!VECTOR_MODE_P (op0_mode
))
2992 RTVEC_ELT (v
, i
) = trueop0
;
2994 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2998 if (!VECTOR_MODE_P (op1_mode
))
2999 RTVEC_ELT (v
, i
) = trueop1
;
3001 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3006 return gen_rtx_CONST_VECTOR (mode
, v
);
3019 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3022 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3024 unsigned int width
= GET_MODE_BITSIZE (mode
);
3026 if (VECTOR_MODE_P (mode
)
3027 && code
!= VEC_CONCAT
3028 && GET_CODE (op0
) == CONST_VECTOR
3029 && GET_CODE (op1
) == CONST_VECTOR
)
3031 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3032 enum machine_mode op0mode
= GET_MODE (op0
);
3033 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3034 enum machine_mode op1mode
= GET_MODE (op1
);
3035 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3036 rtvec v
= rtvec_alloc (n_elts
);
3039 gcc_assert (op0_n_elts
== n_elts
);
3040 gcc_assert (op1_n_elts
== n_elts
);
3041 for (i
= 0; i
< n_elts
; i
++)
3043 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3044 CONST_VECTOR_ELT (op0
, i
),
3045 CONST_VECTOR_ELT (op1
, i
));
3048 RTVEC_ELT (v
, i
) = x
;
3051 return gen_rtx_CONST_VECTOR (mode
, v
);
3054 if (VECTOR_MODE_P (mode
)
3055 && code
== VEC_CONCAT
3056 && (CONST_INT_P (op0
)
3057 || GET_CODE (op0
) == CONST_DOUBLE
3058 || GET_CODE (op0
) == CONST_FIXED
)
3059 && (CONST_INT_P (op1
)
3060 || GET_CODE (op1
) == CONST_DOUBLE
3061 || GET_CODE (op1
) == CONST_FIXED
))
3063 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3064 rtvec v
= rtvec_alloc (n_elts
);
3066 gcc_assert (n_elts
>= 2);
3069 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3070 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3072 RTVEC_ELT (v
, 0) = op0
;
3073 RTVEC_ELT (v
, 1) = op1
;
3077 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3078 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3081 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3082 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3083 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3085 for (i
= 0; i
< op0_n_elts
; ++i
)
3086 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3087 for (i
= 0; i
< op1_n_elts
; ++i
)
3088 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3091 return gen_rtx_CONST_VECTOR (mode
, v
);
3094 if (SCALAR_FLOAT_MODE_P (mode
)
3095 && GET_CODE (op0
) == CONST_DOUBLE
3096 && GET_CODE (op1
) == CONST_DOUBLE
3097 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3108 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3110 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3112 for (i
= 0; i
< 4; i
++)
3129 real_from_target (&r
, tmp0
, mode
);
3130 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3134 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3137 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3138 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3139 real_convert (&f0
, mode
, &f0
);
3140 real_convert (&f1
, mode
, &f1
);
3142 if (HONOR_SNANS (mode
)
3143 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3147 && REAL_VALUES_EQUAL (f1
, dconst0
)
3148 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3151 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3152 && flag_trapping_math
3153 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3155 int s0
= REAL_VALUE_NEGATIVE (f0
);
3156 int s1
= REAL_VALUE_NEGATIVE (f1
);
3161 /* Inf + -Inf = NaN plus exception. */
3166 /* Inf - Inf = NaN plus exception. */
3171 /* Inf / Inf = NaN plus exception. */
3178 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3179 && flag_trapping_math
3180 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3181 || (REAL_VALUE_ISINF (f1
)
3182 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3183 /* Inf * 0 = NaN plus exception. */
3186 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3188 real_convert (&result
, mode
, &value
);
3190 /* Don't constant fold this floating point operation if
3191 the result has overflowed and flag_trapping_math. */
3193 if (flag_trapping_math
3194 && MODE_HAS_INFINITIES (mode
)
3195 && REAL_VALUE_ISINF (result
)
3196 && !REAL_VALUE_ISINF (f0
)
3197 && !REAL_VALUE_ISINF (f1
))
3198 /* Overflow plus exception. */
3201 /* Don't constant fold this floating point operation if the
3202 result may dependent upon the run-time rounding mode and
3203 flag_rounding_math is set, or if GCC's software emulation
3204 is unable to accurately represent the result. */
3206 if ((flag_rounding_math
3207 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3208 && (inexact
|| !real_identical (&result
, &value
)))
3211 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3215 /* We can fold some multi-word operations. */
3216 if (GET_MODE_CLASS (mode
) == MODE_INT
3217 && width
== HOST_BITS_PER_WIDE_INT
* 2
3218 && (GET_CODE (op0
) == CONST_DOUBLE
|| CONST_INT_P (op0
))
3219 && (GET_CODE (op1
) == CONST_DOUBLE
|| CONST_INT_P (op1
)))
3221 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
3222 HOST_WIDE_INT h1
, h2
, hv
, ht
;
3224 if (GET_CODE (op0
) == CONST_DOUBLE
)
3225 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
3227 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
3229 if (GET_CODE (op1
) == CONST_DOUBLE
)
3230 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
3232 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
3237 /* A - B == A + (-B). */
3238 neg_double (l2
, h2
, &lv
, &hv
);
3241 /* Fall through.... */
3244 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3248 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3252 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3253 &lv
, &hv
, <
, &ht
))
3258 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3259 <
, &ht
, &lv
, &hv
))
3264 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3265 &lv
, &hv
, <
, &ht
))
3270 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3271 <
, &ht
, &lv
, &hv
))
3276 lv
= l1
& l2
, hv
= h1
& h2
;
3280 lv
= l1
| l2
, hv
= h1
| h2
;
3284 lv
= l1
^ l2
, hv
= h1
^ h2
;
3290 && ((unsigned HOST_WIDE_INT
) l1
3291 < (unsigned HOST_WIDE_INT
) l2
)))
3300 && ((unsigned HOST_WIDE_INT
) l1
3301 > (unsigned HOST_WIDE_INT
) l2
)))
3308 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
3310 && ((unsigned HOST_WIDE_INT
) l1
3311 < (unsigned HOST_WIDE_INT
) l2
)))
3318 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
3320 && ((unsigned HOST_WIDE_INT
) l1
3321 > (unsigned HOST_WIDE_INT
) l2
)))
3327 case LSHIFTRT
: case ASHIFTRT
:
3329 case ROTATE
: case ROTATERT
:
3330 if (SHIFT_COUNT_TRUNCATED
)
3331 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
3333 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
3336 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3337 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
3339 else if (code
== ASHIFT
)
3340 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
3341 else if (code
== ROTATE
)
3342 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3343 else /* code == ROTATERT */
3344 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3351 return immed_double_const (lv
, hv
, mode
);
3354 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3355 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3357 /* Get the integer argument values in two forms:
3358 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3360 arg0
= INTVAL (op0
);
3361 arg1
= INTVAL (op1
);
3363 if (width
< HOST_BITS_PER_WIDE_INT
)
3365 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3366 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3369 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3370 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3373 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3374 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3382 /* Compute the value of the arithmetic. */
3387 val
= arg0s
+ arg1s
;
3391 val
= arg0s
- arg1s
;
3395 val
= arg0s
* arg1s
;
3400 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3403 val
= arg0s
/ arg1s
;
3408 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3411 val
= arg0s
% arg1s
;
3416 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3419 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3424 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3427 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3445 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3446 the value is in range. We can't return any old value for
3447 out-of-range arguments because either the middle-end (via
3448 shift_truncation_mask) or the back-end might be relying on
3449 target-specific knowledge. Nor can we rely on
3450 shift_truncation_mask, since the shift might not be part of an
3451 ashlM3, lshrM3 or ashrM3 instruction. */
3452 if (SHIFT_COUNT_TRUNCATED
)
3453 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3454 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3457 val
= (code
== ASHIFT
3458 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3459 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3461 /* Sign-extend the result for arithmetic right shifts. */
3462 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3463 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
3471 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3472 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3480 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3481 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3485 /* Do nothing here. */
3489 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3493 val
= ((unsigned HOST_WIDE_INT
) arg0
3494 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3498 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3502 val
= ((unsigned HOST_WIDE_INT
) arg0
3503 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3516 /* ??? There are simplifications that can be done. */
3523 return gen_int_mode (val
, mode
);
3531 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3534 Rather than test for specific case, we do this by a brute-force method
3535 and do all possible simplifications until no more changes occur. Then
3536 we rebuild the operation. */
3538 struct simplify_plus_minus_op_data
3545 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3549 result
= (commutative_operand_precedence (y
)
3550 - commutative_operand_precedence (x
));
3554 /* Group together equal REGs to do more simplification. */
3555 if (REG_P (x
) && REG_P (y
))
3556 return REGNO (x
) > REGNO (y
);
3562 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3565 struct simplify_plus_minus_op_data ops
[8];
3567 int n_ops
= 2, input_ops
= 2;
3568 int changed
, n_constants
= 0, canonicalized
= 0;
3571 memset (ops
, 0, sizeof ops
);
3573 /* Set up the two operands and then expand them until nothing has been
3574 changed. If we run out of room in our array, give up; this should
3575 almost never happen. */
3580 ops
[1].neg
= (code
== MINUS
);
3586 for (i
= 0; i
< n_ops
; i
++)
3588 rtx this_op
= ops
[i
].op
;
3589 int this_neg
= ops
[i
].neg
;
3590 enum rtx_code this_code
= GET_CODE (this_op
);
3599 ops
[n_ops
].op
= XEXP (this_op
, 1);
3600 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3603 ops
[i
].op
= XEXP (this_op
, 0);
3606 canonicalized
|= this_neg
;
3610 ops
[i
].op
= XEXP (this_op
, 0);
3611 ops
[i
].neg
= ! this_neg
;
3618 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3619 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3620 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3622 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3623 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3624 ops
[n_ops
].neg
= this_neg
;
3632 /* ~a -> (-a - 1) */
3635 ops
[n_ops
].op
= constm1_rtx
;
3636 ops
[n_ops
++].neg
= this_neg
;
3637 ops
[i
].op
= XEXP (this_op
, 0);
3638 ops
[i
].neg
= !this_neg
;
3648 ops
[i
].op
= neg_const_int (mode
, this_op
);
3662 if (n_constants
> 1)
3665 gcc_assert (n_ops
>= 2);
3667 /* If we only have two operands, we can avoid the loops. */
3670 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3673 /* Get the two operands. Be careful with the order, especially for
3674 the cases where code == MINUS. */
3675 if (ops
[0].neg
&& ops
[1].neg
)
3677 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3680 else if (ops
[0].neg
)
3691 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3694 /* Now simplify each pair of operands until nothing changes. */
3697 /* Insertion sort is good enough for an eight-element array. */
3698 for (i
= 1; i
< n_ops
; i
++)
3700 struct simplify_plus_minus_op_data save
;
3702 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
3708 ops
[j
+ 1] = ops
[j
];
3709 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
3714 for (i
= n_ops
- 1; i
> 0; i
--)
3715 for (j
= i
- 1; j
>= 0; j
--)
3717 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
3718 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
3720 if (lhs
!= 0 && rhs
!= 0)
3722 enum rtx_code ncode
= PLUS
;
3728 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3730 else if (swap_commutative_operands_p (lhs
, rhs
))
3731 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3733 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
3734 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
3736 rtx tem_lhs
, tem_rhs
;
3738 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3739 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3740 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3742 if (tem
&& !CONSTANT_P (tem
))
3743 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3746 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3748 /* Reject "simplifications" that just wrap the two
3749 arguments in a CONST. Failure to do so can result
3750 in infinite recursion with simplify_binary_operation
3751 when it calls us to simplify CONST operations. */
3753 && ! (GET_CODE (tem
) == CONST
3754 && GET_CODE (XEXP (tem
, 0)) == ncode
3755 && XEXP (XEXP (tem
, 0), 0) == lhs
3756 && XEXP (XEXP (tem
, 0), 1) == rhs
))
3759 if (GET_CODE (tem
) == NEG
)
3760 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3761 if (CONST_INT_P (tem
) && lneg
)
3762 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3766 ops
[j
].op
= NULL_RTX
;
3773 /* If nothing changed, fail. */
3777 /* Pack all the operands to the lower-numbered entries. */
3778 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3788 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3790 && CONST_INT_P (ops
[1].op
)
3791 && CONSTANT_P (ops
[0].op
)
3793 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3795 /* We suppressed creation of trivial CONST expressions in the
3796 combination loop to avoid recursion. Create one manually now.
3797 The combination loop should have ensured that there is exactly
3798 one CONST_INT, and the sort will have ensured that it is last
3799 in the array and that any other constant will be next-to-last. */
3802 && CONST_INT_P (ops
[n_ops
- 1].op
)
3803 && CONSTANT_P (ops
[n_ops
- 2].op
))
3805 rtx value
= ops
[n_ops
- 1].op
;
3806 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3807 value
= neg_const_int (mode
, value
);
3808 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3812 /* Put a non-negated operand first, if possible. */
3814 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3817 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3826 /* Now make the result by performing the requested operations. */
3828 for (i
= 1; i
< n_ops
; i
++)
3829 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3830 mode
, result
, ops
[i
].op
);
3835 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3837 plus_minus_operand_p (const_rtx x
)
3839 return GET_CODE (x
) == PLUS
3840 || GET_CODE (x
) == MINUS
3841 || (GET_CODE (x
) == CONST
3842 && GET_CODE (XEXP (x
, 0)) == PLUS
3843 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3844 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3847 /* Like simplify_binary_operation except used for relational operators.
3848 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3849 not also be VOIDmode.
3851 CMP_MODE specifies in which mode the comparison is done in, so it is
3852 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3853 the operands or, if both are VOIDmode, the operands are compared in
3854 "infinite precision". */
3856 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3857 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3859 rtx tem
, trueop0
, trueop1
;
3861 if (cmp_mode
== VOIDmode
)
3862 cmp_mode
= GET_MODE (op0
);
3863 if (cmp_mode
== VOIDmode
)
3864 cmp_mode
= GET_MODE (op1
);
3866 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3869 if (SCALAR_FLOAT_MODE_P (mode
))
3871 if (tem
== const0_rtx
)
3872 return CONST0_RTX (mode
);
3873 #ifdef FLOAT_STORE_FLAG_VALUE
3875 REAL_VALUE_TYPE val
;
3876 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3877 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3883 if (VECTOR_MODE_P (mode
))
3885 if (tem
== const0_rtx
)
3886 return CONST0_RTX (mode
);
3887 #ifdef VECTOR_STORE_FLAG_VALUE
3892 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
3893 if (val
== NULL_RTX
)
3895 if (val
== const1_rtx
)
3896 return CONST1_RTX (mode
);
3898 units
= GET_MODE_NUNITS (mode
);
3899 v
= rtvec_alloc (units
);
3900 for (i
= 0; i
< units
; i
++)
3901 RTVEC_ELT (v
, i
) = val
;
3902 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
3912 /* For the following tests, ensure const0_rtx is op1. */
3913 if (swap_commutative_operands_p (op0
, op1
)
3914 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
3915 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
3917 /* If op0 is a compare, extract the comparison arguments from it. */
3918 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3919 return simplify_gen_relational (code
, mode
, VOIDmode
,
3920 XEXP (op0
, 0), XEXP (op0
, 1));
3922 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
3926 trueop0
= avoid_constant_pool_reference (op0
);
3927 trueop1
= avoid_constant_pool_reference (op1
);
3928 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
3932 /* This part of simplify_relational_operation is only used when CMP_MODE
3933 is not in class MODE_CC (i.e. it is a real comparison).
3935 MODE is the mode of the result, while CMP_MODE specifies in which
3936 mode the comparison is done in, so it is the mode of the operands. */
3939 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
3940 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3942 enum rtx_code op0code
= GET_CODE (op0
);
3944 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
3946 /* If op0 is a comparison, extract the comparison arguments
3950 if (GET_MODE (op0
) == mode
)
3951 return simplify_rtx (op0
);
3953 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
3954 XEXP (op0
, 0), XEXP (op0
, 1));
3956 else if (code
== EQ
)
3958 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
3959 if (new_code
!= UNKNOWN
)
3960 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
3961 XEXP (op0
, 0), XEXP (op0
, 1));
3965 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
3966 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
3967 if ((code
== LTU
|| code
== GEU
)
3968 && GET_CODE (op0
) == PLUS
3969 && CONST_INT_P (XEXP (op0
, 1))
3970 && (rtx_equal_p (op1
, XEXP (op0
, 0))
3971 || rtx_equal_p (op1
, XEXP (op0
, 1))))
3974 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
3975 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
3976 cmp_mode
, XEXP (op0
, 0), new_cmp
);
3979 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3980 if ((code
== LTU
|| code
== GEU
)
3981 && GET_CODE (op0
) == PLUS
3982 && rtx_equal_p (op1
, XEXP (op0
, 1))
3983 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3984 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
3985 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
, XEXP (op0
, 0));
3987 if (op1
== const0_rtx
)
3989 /* Canonicalize (GTU x 0) as (NE x 0). */
3991 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
3992 /* Canonicalize (LEU x 0) as (EQ x 0). */
3994 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
3996 else if (op1
== const1_rtx
)
4001 /* Canonicalize (GE x 1) as (GT x 0). */
4002 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4005 /* Canonicalize (GEU x 1) as (NE x 0). */
4006 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4009 /* Canonicalize (LT x 1) as (LE x 0). */
4010 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4013 /* Canonicalize (LTU x 1) as (EQ x 0). */
4014 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4020 else if (op1
== constm1_rtx
)
4022 /* Canonicalize (LE x -1) as (LT x 0). */
4024 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4025 /* Canonicalize (GT x -1) as (GE x 0). */
4027 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4030 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4031 if ((code
== EQ
|| code
== NE
)
4032 && (op0code
== PLUS
|| op0code
== MINUS
)
4034 && CONSTANT_P (XEXP (op0
, 1))
4035 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4037 rtx x
= XEXP (op0
, 0);
4038 rtx c
= XEXP (op0
, 1);
4040 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
4042 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
4045 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4046 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4048 && op1
== const0_rtx
4049 && GET_MODE_CLASS (mode
) == MODE_INT
4050 && cmp_mode
!= VOIDmode
4051 /* ??? Work-around BImode bugs in the ia64 backend. */
4053 && cmp_mode
!= BImode
4054 && nonzero_bits (op0
, cmp_mode
) == 1
4055 && STORE_FLAG_VALUE
== 1)
4056 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4057 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4058 : lowpart_subreg (mode
, op0
, cmp_mode
);
4060 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4061 if ((code
== EQ
|| code
== NE
)
4062 && op1
== const0_rtx
4064 return simplify_gen_relational (code
, mode
, cmp_mode
,
4065 XEXP (op0
, 0), XEXP (op0
, 1));
4067 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4068 if ((code
== EQ
|| code
== NE
)
4070 && rtx_equal_p (XEXP (op0
, 0), op1
)
4071 && !side_effects_p (XEXP (op0
, 0)))
4072 return simplify_gen_relational (code
, mode
, cmp_mode
,
4073 XEXP (op0
, 1), const0_rtx
);
4075 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4076 if ((code
== EQ
|| code
== NE
)
4078 && rtx_equal_p (XEXP (op0
, 1), op1
)
4079 && !side_effects_p (XEXP (op0
, 1)))
4080 return simplify_gen_relational (code
, mode
, cmp_mode
,
4081 XEXP (op0
, 0), const0_rtx
);
4083 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4084 if ((code
== EQ
|| code
== NE
)
4086 && (CONST_INT_P (op1
)
4087 || GET_CODE (op1
) == CONST_DOUBLE
)
4088 && (CONST_INT_P (XEXP (op0
, 1))
4089 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
4090 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4091 simplify_gen_binary (XOR
, cmp_mode
,
4092 XEXP (op0
, 1), op1
));
4094 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4100 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4101 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4102 XEXP (op0
, 0), const0_rtx
);
4107 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4108 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4109 XEXP (op0
, 0), const0_rtx
);
4128 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4129 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4130 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4131 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4132 For floating-point comparisons, assume that the operands were ordered. */
4135 comparison_result (enum rtx_code code
, int known_results
)
4141 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4144 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4148 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4151 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4155 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4158 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4161 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4163 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4166 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4168 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4171 return const_true_rtx
;
4179 /* Check if the given comparison (done in the given MODE) is actually a
4180 tautology or a contradiction.
4181 If no simplification is possible, this function returns zero.
4182 Otherwise, it returns either const_true_rtx or const0_rtx. */
4185 simplify_const_relational_operation (enum rtx_code code
,
4186 enum machine_mode mode
,
4193 gcc_assert (mode
!= VOIDmode
4194 || (GET_MODE (op0
) == VOIDmode
4195 && GET_MODE (op1
) == VOIDmode
));
4197 /* If op0 is a compare, extract the comparison arguments from it. */
4198 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4200 op1
= XEXP (op0
, 1);
4201 op0
= XEXP (op0
, 0);
4203 if (GET_MODE (op0
) != VOIDmode
)
4204 mode
= GET_MODE (op0
);
4205 else if (GET_MODE (op1
) != VOIDmode
)
4206 mode
= GET_MODE (op1
);
4211 /* We can't simplify MODE_CC values since we don't know what the
4212 actual comparison is. */
4213 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4216 /* Make sure the constant is second. */
4217 if (swap_commutative_operands_p (op0
, op1
))
4219 tem
= op0
, op0
= op1
, op1
= tem
;
4220 code
= swap_condition (code
);
4223 trueop0
= avoid_constant_pool_reference (op0
);
4224 trueop1
= avoid_constant_pool_reference (op1
);
4226 /* For integer comparisons of A and B maybe we can simplify A - B and can
4227 then simplify a comparison of that with zero. If A and B are both either
4228 a register or a CONST_INT, this can't help; testing for these cases will
4229 prevent infinite recursion here and speed things up.
4231 We can only do this for EQ and NE comparisons as otherwise we may
4232 lose or introduce overflow which we cannot disregard as undefined as
4233 we do not know the signedness of the operation on either the left or
4234 the right hand side of the comparison. */
4236 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4237 && (code
== EQ
|| code
== NE
)
4238 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4239 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4240 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4241 /* We cannot do this if tem is a nonzero address. */
4242 && ! nonzero_address_p (tem
))
4243 return simplify_const_relational_operation (signed_condition (code
),
4244 mode
, tem
, const0_rtx
);
4246 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4247 return const_true_rtx
;
4249 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4252 /* For modes without NaNs, if the two operands are equal, we know the
4253 result except if they have side-effects. Even with NaNs we know
4254 the result of unordered comparisons and, if signaling NaNs are
4255 irrelevant, also the result of LT/GT/LTGT. */
4256 if ((! HONOR_NANS (GET_MODE (trueop0
))
4257 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4258 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4259 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4260 && rtx_equal_p (trueop0
, trueop1
)
4261 && ! side_effects_p (trueop0
))
4262 return comparison_result (code
, CMP_EQ
);
4264 /* If the operands are floating-point constants, see if we can fold
4266 if (GET_CODE (trueop0
) == CONST_DOUBLE
4267 && GET_CODE (trueop1
) == CONST_DOUBLE
4268 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4270 REAL_VALUE_TYPE d0
, d1
;
4272 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4273 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4275 /* Comparisons are unordered iff at least one of the values is NaN. */
4276 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4286 return const_true_rtx
;
4299 return comparison_result (code
,
4300 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4301 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4304 /* Otherwise, see if the operands are both integers. */
4305 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4306 && (GET_CODE (trueop0
) == CONST_DOUBLE
4307 || CONST_INT_P (trueop0
))
4308 && (GET_CODE (trueop1
) == CONST_DOUBLE
4309 || CONST_INT_P (trueop1
)))
4311 int width
= GET_MODE_BITSIZE (mode
);
4312 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4313 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4315 /* Get the two words comprising each integer constant. */
4316 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
4318 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4319 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4323 l0u
= l0s
= INTVAL (trueop0
);
4324 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4327 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4329 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4330 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4334 l1u
= l1s
= INTVAL (trueop1
);
4335 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4338 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4339 we have to sign or zero-extend the values. */
4340 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4342 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4343 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4345 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4346 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
4348 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4349 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
4351 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4352 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4354 if (h0u
== h1u
&& l0u
== l1u
)
4355 return comparison_result (code
, CMP_EQ
);
4359 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4360 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4361 return comparison_result (code
, cr
);
4365 /* Optimize comparisons with upper and lower bounds. */
4366 if (SCALAR_INT_MODE_P (mode
)
4367 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
4368 && CONST_INT_P (trueop1
))
4371 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4372 HOST_WIDE_INT val
= INTVAL (trueop1
);
4373 HOST_WIDE_INT mmin
, mmax
;
4383 /* Get a reduced range if the sign bit is zero. */
4384 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4391 rtx mmin_rtx
, mmax_rtx
;
4392 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4394 mmin
= INTVAL (mmin_rtx
);
4395 mmax
= INTVAL (mmax_rtx
);
4398 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4400 mmin
>>= (sign_copies
- 1);
4401 mmax
>>= (sign_copies
- 1);
4407 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4409 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4410 return const_true_rtx
;
4411 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4416 return const_true_rtx
;
4421 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4423 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4424 return const_true_rtx
;
4425 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4430 return const_true_rtx
;
4436 /* x == y is always false for y out of range. */
4437 if (val
< mmin
|| val
> mmax
)
4441 /* x > y is always false for y >= mmax, always true for y < mmin. */
4443 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4445 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4446 return const_true_rtx
;
4452 return const_true_rtx
;
4455 /* x < y is always false for y <= mmin, always true for y > mmax. */
4457 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4459 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4460 return const_true_rtx
;
4466 return const_true_rtx
;
4470 /* x != y is always true for y out of range. */
4471 if (val
< mmin
|| val
> mmax
)
4472 return const_true_rtx
;
4480 /* Optimize integer comparisons with zero. */
4481 if (trueop1
== const0_rtx
)
4483 /* Some addresses are known to be nonzero. We don't know
4484 their sign, but equality comparisons are known. */
4485 if (nonzero_address_p (trueop0
))
4487 if (code
== EQ
|| code
== LEU
)
4489 if (code
== NE
|| code
== GTU
)
4490 return const_true_rtx
;
4493 /* See if the first operand is an IOR with a constant. If so, we
4494 may be able to determine the result of this comparison. */
4495 if (GET_CODE (op0
) == IOR
)
4497 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4498 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4500 int sign_bitnum
= GET_MODE_BITSIZE (mode
) - 1;
4501 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4502 && (INTVAL (inner_const
)
4503 & ((HOST_WIDE_INT
) 1 << sign_bitnum
)));
4512 return const_true_rtx
;
4516 return const_true_rtx
;
4530 /* Optimize comparison of ABS with zero. */
4531 if (trueop1
== CONST0_RTX (mode
)
4532 && (GET_CODE (trueop0
) == ABS
4533 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4534 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4539 /* Optimize abs(x) < 0.0. */
4540 if (!HONOR_SNANS (mode
)
4541 && (!INTEGRAL_MODE_P (mode
)
4542 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4544 if (INTEGRAL_MODE_P (mode
)
4545 && (issue_strict_overflow_warning
4546 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4547 warning (OPT_Wstrict_overflow
,
4548 ("assuming signed overflow does not occur when "
4549 "assuming abs (x) < 0 is false"));
4555 /* Optimize abs(x) >= 0.0. */
4556 if (!HONOR_NANS (mode
)
4557 && (!INTEGRAL_MODE_P (mode
)
4558 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4560 if (INTEGRAL_MODE_P (mode
)
4561 && (issue_strict_overflow_warning
4562 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4563 warning (OPT_Wstrict_overflow
,
4564 ("assuming signed overflow does not occur when "
4565 "assuming abs (x) >= 0 is true"));
4566 return const_true_rtx
;
4571 /* Optimize ! (abs(x) < 0.0). */
4572 return const_true_rtx
;
4582 /* Simplify CODE, an operation with result mode MODE and three operands,
4583 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4584 a constant. Return 0 if no simplifications is possible. */
4587 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4588 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4591 unsigned int width
= GET_MODE_BITSIZE (mode
);
4593 /* VOIDmode means "infinite" precision. */
4595 width
= HOST_BITS_PER_WIDE_INT
;
4601 if (CONST_INT_P (op0
)
4602 && CONST_INT_P (op1
)
4603 && CONST_INT_P (op2
)
4604 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4605 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4607 /* Extracting a bit-field from a constant */
4608 HOST_WIDE_INT val
= INTVAL (op0
);
4610 if (BITS_BIG_ENDIAN
)
4611 val
>>= (GET_MODE_BITSIZE (op0_mode
)
4612 - INTVAL (op2
) - INTVAL (op1
));
4614 val
>>= INTVAL (op2
);
4616 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4618 /* First zero-extend. */
4619 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4620 /* If desired, propagate sign bit. */
4621 if (code
== SIGN_EXTRACT
4622 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
4623 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4626 /* Clear the bits that don't belong in our mode,
4627 unless they and our sign bit are all one.
4628 So we get either a reasonable negative value or a reasonable
4629 unsigned value for this mode. */
4630 if (width
< HOST_BITS_PER_WIDE_INT
4631 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
4632 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
4633 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4635 return gen_int_mode (val
, mode
);
4640 if (CONST_INT_P (op0
))
4641 return op0
!= const0_rtx
? op1
: op2
;
4643 /* Convert c ? a : a into "a". */
4644 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4647 /* Convert a != b ? a : b into "a". */
4648 if (GET_CODE (op0
) == NE
4649 && ! side_effects_p (op0
)
4650 && ! HONOR_NANS (mode
)
4651 && ! HONOR_SIGNED_ZEROS (mode
)
4652 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4653 && rtx_equal_p (XEXP (op0
, 1), op2
))
4654 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4655 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4658 /* Convert a == b ? a : b into "b". */
4659 if (GET_CODE (op0
) == EQ
4660 && ! side_effects_p (op0
)
4661 && ! HONOR_NANS (mode
)
4662 && ! HONOR_SIGNED_ZEROS (mode
)
4663 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4664 && rtx_equal_p (XEXP (op0
, 1), op2
))
4665 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4666 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4669 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4671 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4672 ? GET_MODE (XEXP (op0
, 1))
4673 : GET_MODE (XEXP (op0
, 0)));
4676 /* Look for happy constants in op1 and op2. */
4677 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
4679 HOST_WIDE_INT t
= INTVAL (op1
);
4680 HOST_WIDE_INT f
= INTVAL (op2
);
4682 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4683 code
= GET_CODE (op0
);
4684 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4687 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4695 return simplify_gen_relational (code
, mode
, cmp_mode
,
4696 XEXP (op0
, 0), XEXP (op0
, 1));
4699 if (cmp_mode
== VOIDmode
)
4700 cmp_mode
= op0_mode
;
4701 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4702 cmp_mode
, XEXP (op0
, 0),
4705 /* See if any simplifications were possible. */
4708 if (CONST_INT_P (temp
))
4709 return temp
== const0_rtx
? op2
: op1
;
4711 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4717 gcc_assert (GET_MODE (op0
) == mode
);
4718 gcc_assert (GET_MODE (op1
) == mode
);
4719 gcc_assert (VECTOR_MODE_P (mode
));
4720 op2
= avoid_constant_pool_reference (op2
);
4721 if (CONST_INT_P (op2
))
4723 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4724 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4725 int mask
= (1 << n_elts
) - 1;
4727 if (!(INTVAL (op2
) & mask
))
4729 if ((INTVAL (op2
) & mask
) == mask
)
4732 op0
= avoid_constant_pool_reference (op0
);
4733 op1
= avoid_constant_pool_reference (op1
);
4734 if (GET_CODE (op0
) == CONST_VECTOR
4735 && GET_CODE (op1
) == CONST_VECTOR
)
4737 rtvec v
= rtvec_alloc (n_elts
);
4740 for (i
= 0; i
< n_elts
; i
++)
4741 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4742 ? CONST_VECTOR_ELT (op0
, i
)
4743 : CONST_VECTOR_ELT (op1
, i
));
4744 return gen_rtx_CONST_VECTOR (mode
, v
);
4756 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4758 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4760 Works by unpacking OP into a collection of 8-bit values
4761 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4762 and then repacking them again for OUTERMODE. */
4765 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4766 enum machine_mode innermode
, unsigned int byte
)
4768 /* We support up to 512-bit values (for V8DFmode). */
4772 value_mask
= (1 << value_bit
) - 1
4774 unsigned char value
[max_bitsize
/ value_bit
];
4783 rtvec result_v
= NULL
;
4784 enum mode_class outer_class
;
4785 enum machine_mode outer_submode
;
4787 /* Some ports misuse CCmode. */
4788 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
4791 /* We have no way to represent a complex constant at the rtl level. */
4792 if (COMPLEX_MODE_P (outermode
))
4795 /* Unpack the value. */
4797 if (GET_CODE (op
) == CONST_VECTOR
)
4799 num_elem
= CONST_VECTOR_NUNITS (op
);
4800 elems
= &CONST_VECTOR_ELT (op
, 0);
4801 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4807 elem_bitsize
= max_bitsize
;
4809 /* If this asserts, it is too complicated; reducing value_bit may help. */
4810 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4811 /* I don't know how to handle endianness of sub-units. */
4812 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4814 for (elem
= 0; elem
< num_elem
; elem
++)
4817 rtx el
= elems
[elem
];
4819 /* Vectors are kept in target memory order. (This is probably
4822 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4823 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4825 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4826 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4827 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4828 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4829 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4832 switch (GET_CODE (el
))
4836 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4838 *vp
++ = INTVAL (el
) >> i
;
4839 /* CONST_INTs are always logically sign-extended. */
4840 for (; i
< elem_bitsize
; i
+= value_bit
)
4841 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4845 if (GET_MODE (el
) == VOIDmode
)
4847 /* If this triggers, someone should have generated a
4848 CONST_INT instead. */
4849 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4851 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4852 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4853 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
4856 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
4859 /* It shouldn't matter what's done here, so fill it with
4861 for (; i
< elem_bitsize
; i
+= value_bit
)
4866 long tmp
[max_bitsize
/ 32];
4867 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
4869 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
4870 gcc_assert (bitsize
<= elem_bitsize
);
4871 gcc_assert (bitsize
% value_bit
== 0);
4873 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
4876 /* real_to_target produces its result in words affected by
4877 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4878 and use WORDS_BIG_ENDIAN instead; see the documentation
4879 of SUBREG in rtl.texi. */
4880 for (i
= 0; i
< bitsize
; i
+= value_bit
)
4883 if (WORDS_BIG_ENDIAN
)
4884 ibase
= bitsize
- 1 - i
;
4887 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
4890 /* It shouldn't matter what's done here, so fill it with
4892 for (; i
< elem_bitsize
; i
+= value_bit
)
4898 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4900 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4901 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4905 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4906 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4907 for (; i
< 2 * HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4909 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
4910 >> (i
- HOST_BITS_PER_WIDE_INT
);
4911 for (; i
< elem_bitsize
; i
+= value_bit
)
4921 /* Now, pick the right byte to start with. */
4922 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4923 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4924 will already have offset 0. */
4925 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
4927 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
4929 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4930 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4931 byte
= (subword_byte
% UNITS_PER_WORD
4932 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4935 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4936 so if it's become negative it will instead be very large.) */
4937 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4939 /* Convert from bytes to chunks of size value_bit. */
4940 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
4942 /* Re-pack the value. */
4944 if (VECTOR_MODE_P (outermode
))
4946 num_elem
= GET_MODE_NUNITS (outermode
);
4947 result_v
= rtvec_alloc (num_elem
);
4948 elems
= &RTVEC_ELT (result_v
, 0);
4949 outer_submode
= GET_MODE_INNER (outermode
);
4955 outer_submode
= outermode
;
4958 outer_class
= GET_MODE_CLASS (outer_submode
);
4959 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
4961 gcc_assert (elem_bitsize
% value_bit
== 0);
4962 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
4964 for (elem
= 0; elem
< num_elem
; elem
++)
4968 /* Vectors are stored in target memory order. (This is probably
4971 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4972 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4974 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4975 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4976 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4977 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4978 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4981 switch (outer_class
)
4984 case MODE_PARTIAL_INT
:
4986 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
4989 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4991 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4992 for (; i
< elem_bitsize
; i
+= value_bit
)
4993 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4994 << (i
- HOST_BITS_PER_WIDE_INT
));
4996 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4998 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4999 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5000 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
5001 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5008 case MODE_DECIMAL_FLOAT
:
5011 long tmp
[max_bitsize
/ 32];
5013 /* real_from_target wants its input in words affected by
5014 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5015 and use WORDS_BIG_ENDIAN instead; see the documentation
5016 of SUBREG in rtl.texi. */
5017 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5019 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5022 if (WORDS_BIG_ENDIAN
)
5023 ibase
= elem_bitsize
- 1 - i
;
5026 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5029 real_from_target (&r
, tmp
, outer_submode
);
5030 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5042 f
.mode
= outer_submode
;
5045 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5047 f
.data
.low
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5048 for (; i
< elem_bitsize
; i
+= value_bit
)
5049 f
.data
.high
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
5050 << (i
- HOST_BITS_PER_WIDE_INT
));
5052 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5060 if (VECTOR_MODE_P (outermode
))
5061 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5066 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5067 Return 0 if no simplifications are possible. */
5069 simplify_subreg (enum machine_mode outermode
, rtx op
,
5070 enum machine_mode innermode
, unsigned int byte
)
5072 /* Little bit of sanity checking. */
5073 gcc_assert (innermode
!= VOIDmode
);
5074 gcc_assert (outermode
!= VOIDmode
);
5075 gcc_assert (innermode
!= BLKmode
);
5076 gcc_assert (outermode
!= BLKmode
);
5078 gcc_assert (GET_MODE (op
) == innermode
5079 || GET_MODE (op
) == VOIDmode
);
5081 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
5082 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5084 if (outermode
== innermode
&& !byte
)
5087 if (CONST_INT_P (op
)
5088 || GET_CODE (op
) == CONST_DOUBLE
5089 || GET_CODE (op
) == CONST_FIXED
5090 || GET_CODE (op
) == CONST_VECTOR
)
5091 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5093 /* Changing mode twice with SUBREG => just change it once,
5094 or not at all if changing back op starting mode. */
5095 if (GET_CODE (op
) == SUBREG
)
5097 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5098 int final_offset
= byte
+ SUBREG_BYTE (op
);
5101 if (outermode
== innermostmode
5102 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5103 return SUBREG_REG (op
);
5105 /* The SUBREG_BYTE represents offset, as if the value were stored
5106 in memory. Irritating exception is paradoxical subreg, where
5107 we define SUBREG_BYTE to be 0. On big endian machines, this
5108 value should be negative. For a moment, undo this exception. */
5109 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5111 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5112 if (WORDS_BIG_ENDIAN
)
5113 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5114 if (BYTES_BIG_ENDIAN
)
5115 final_offset
+= difference
% UNITS_PER_WORD
;
5117 if (SUBREG_BYTE (op
) == 0
5118 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5120 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5121 if (WORDS_BIG_ENDIAN
)
5122 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5123 if (BYTES_BIG_ENDIAN
)
5124 final_offset
+= difference
% UNITS_PER_WORD
;
5127 /* See whether resulting subreg will be paradoxical. */
5128 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5130 /* In nonparadoxical subregs we can't handle negative offsets. */
5131 if (final_offset
< 0)
5133 /* Bail out in case resulting subreg would be incorrect. */
5134 if (final_offset
% GET_MODE_SIZE (outermode
)
5135 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5141 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5143 /* In paradoxical subreg, see if we are still looking on lower part.
5144 If so, our SUBREG_BYTE will be 0. */
5145 if (WORDS_BIG_ENDIAN
)
5146 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5147 if (BYTES_BIG_ENDIAN
)
5148 offset
+= difference
% UNITS_PER_WORD
;
5149 if (offset
== final_offset
)
5155 /* Recurse for further possible simplifications. */
5156 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5160 if (validate_subreg (outermode
, innermostmode
,
5161 SUBREG_REG (op
), final_offset
))
5163 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5164 if (SUBREG_PROMOTED_VAR_P (op
)
5165 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5166 && GET_MODE_CLASS (outermode
) == MODE_INT
5167 && IN_RANGE (GET_MODE_SIZE (outermode
),
5168 GET_MODE_SIZE (innermode
),
5169 GET_MODE_SIZE (innermostmode
))
5170 && subreg_lowpart_p (newx
))
5172 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5173 SUBREG_PROMOTED_UNSIGNED_SET
5174 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5181 /* Merge implicit and explicit truncations. */
5183 if (GET_CODE (op
) == TRUNCATE
5184 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5185 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5186 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5187 GET_MODE (XEXP (op
, 0)));
5189 /* SUBREG of a hard register => just change the register number
5190 and/or mode. If the hard register is not valid in that mode,
5191 suppress this simplification. If the hard register is the stack,
5192 frame, or argument pointer, leave this as a SUBREG. */
5194 if (REG_P (op
) && HARD_REGISTER_P (op
))
5196 unsigned int regno
, final_regno
;
5199 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5200 if (HARD_REGISTER_NUM_P (final_regno
))
5203 int final_offset
= byte
;
5205 /* Adjust offset for paradoxical subregs. */
5207 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5209 int difference
= (GET_MODE_SIZE (innermode
)
5210 - GET_MODE_SIZE (outermode
));
5211 if (WORDS_BIG_ENDIAN
)
5212 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5213 if (BYTES_BIG_ENDIAN
)
5214 final_offset
+= difference
% UNITS_PER_WORD
;
5217 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5219 /* Propagate original regno. We don't have any way to specify
5220 the offset inside original regno, so do so only for lowpart.
5221 The information is used only by alias analysis that can not
5222 grog partial register anyway. */
5224 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5225 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5230 /* If we have a SUBREG of a register that we are replacing and we are
5231 replacing it with a MEM, make a new MEM and try replacing the
5232 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5233 or if we would be widening it. */
5236 && ! mode_dependent_address_p (XEXP (op
, 0))
5237 /* Allow splitting of volatile memory references in case we don't
5238 have instruction to move the whole thing. */
5239 && (! MEM_VOLATILE_P (op
)
5240 || ! have_insn_for (SET
, innermode
))
5241 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5242 return adjust_address_nv (op
, outermode
, byte
);
5244 /* Handle complex values represented as CONCAT
5245 of real and imaginary part. */
5246 if (GET_CODE (op
) == CONCAT
)
5248 unsigned int part_size
, final_offset
;
5251 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5252 if (byte
< part_size
)
5254 part
= XEXP (op
, 0);
5255 final_offset
= byte
;
5259 part
= XEXP (op
, 1);
5260 final_offset
= byte
- part_size
;
5263 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5266 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5269 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5270 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5274 /* Optimize SUBREG truncations of zero and sign extended values. */
5275 if ((GET_CODE (op
) == ZERO_EXTEND
5276 || GET_CODE (op
) == SIGN_EXTEND
)
5277 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
5279 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5281 /* If we're requesting the lowpart of a zero or sign extension,
5282 there are three possibilities. If the outermode is the same
5283 as the origmode, we can omit both the extension and the subreg.
5284 If the outermode is not larger than the origmode, we can apply
5285 the truncation without the extension. Finally, if the outermode
5286 is larger than the origmode, but both are integer modes, we
5287 can just extend to the appropriate mode. */
5290 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5291 if (outermode
== origmode
)
5292 return XEXP (op
, 0);
5293 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
5294 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5295 subreg_lowpart_offset (outermode
,
5297 if (SCALAR_INT_MODE_P (outermode
))
5298 return simplify_gen_unary (GET_CODE (op
), outermode
,
5299 XEXP (op
, 0), origmode
);
5302 /* A SUBREG resulting from a zero extension may fold to zero if
5303 it extracts higher bits that the ZERO_EXTEND's source bits. */
5304 if (GET_CODE (op
) == ZERO_EXTEND
5305 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
5306 return CONST0_RTX (outermode
);
5309 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5310 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5311 the outer subreg is effectively a truncation to the original mode. */
5312 if ((GET_CODE (op
) == LSHIFTRT
5313 || GET_CODE (op
) == ASHIFTRT
)
5314 && SCALAR_INT_MODE_P (outermode
)
5315 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5316 to avoid the possibility that an outer LSHIFTRT shifts by more
5317 than the sign extension's sign_bit_copies and introduces zeros
5318 into the high bits of the result. */
5319 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
5320 && CONST_INT_P (XEXP (op
, 1))
5321 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5322 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5323 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5324 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5325 return simplify_gen_binary (ASHIFTRT
, outermode
,
5326 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5328 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5329 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5330 the outer subreg is effectively a truncation to the original mode. */
5331 if ((GET_CODE (op
) == LSHIFTRT
5332 || GET_CODE (op
) == ASHIFTRT
)
5333 && SCALAR_INT_MODE_P (outermode
)
5334 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5335 && CONST_INT_P (XEXP (op
, 1))
5336 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5337 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5338 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5339 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5340 return simplify_gen_binary (LSHIFTRT
, outermode
,
5341 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5343 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5344 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5345 the outer subreg is effectively a truncation to the original mode. */
5346 if (GET_CODE (op
) == ASHIFT
5347 && SCALAR_INT_MODE_P (outermode
)
5348 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5349 && CONST_INT_P (XEXP (op
, 1))
5350 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5351 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5352 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5353 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5354 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5355 return simplify_gen_binary (ASHIFT
, outermode
,
5356 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5358 /* Recognize a word extraction from a multi-word subreg. */
5359 if ((GET_CODE (op
) == LSHIFTRT
5360 || GET_CODE (op
) == ASHIFTRT
)
5361 && SCALAR_INT_MODE_P (outermode
)
5362 && GET_MODE_BITSIZE (outermode
) >= BITS_PER_WORD
5363 && GET_MODE_BITSIZE (innermode
) >= (2 * GET_MODE_BITSIZE (outermode
))
5364 && CONST_INT_P (XEXP (op
, 1))
5365 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_BITSIZE (outermode
) - 1)) == 0
5366 && INTVAL (XEXP (op
, 1)) >= 0
5367 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5368 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5370 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5371 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5373 ? byte
- shifted_bytes
5374 : byte
+ shifted_bytes
));
5380 /* Make a SUBREG operation or equivalent if it folds. */
5383 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5384 enum machine_mode innermode
, unsigned int byte
)
5388 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5392 if (GET_CODE (op
) == SUBREG
5393 || GET_CODE (op
) == CONCAT
5394 || GET_MODE (op
) == VOIDmode
)
5397 if (validate_subreg (outermode
, innermode
, op
, byte
))
5398 return gen_rtx_SUBREG (outermode
, op
, byte
);
5403 /* Simplify X, an rtx expression.
5405 Return the simplified expression or NULL if no simplifications
5408 This is the preferred entry point into the simplification routines;
5409 however, we still allow passes to call the more specific routines.
5411 Right now GCC has three (yes, three) major bodies of RTL simplification
5412 code that need to be unified.
5414 1. fold_rtx in cse.c. This code uses various CSE specific
5415 information to aid in RTL simplification.
5417 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5418 it uses combine specific information to aid in RTL
5421 3. The routines in this file.
5424 Long term we want to only have one body of simplification code; to
5425 get to that state I recommend the following steps:
5427 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5428 which are not pass dependent state into these routines.
5430 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5431 use this routine whenever possible.
5433 3. Allow for pass dependent state to be provided to these
5434 routines and add simplifications based on the pass dependent
5435 state. Remove code from cse.c & combine.c that becomes
5438 It will take time, but ultimately the compiler will be easier to
5439 maintain and improve. It's totally silly that when we add a
5440 simplification that it needs to be added to 4 places (3 for RTL
5441 simplification and 1 for tree simplification. */
5444 simplify_rtx (const_rtx x
)
5446 const enum rtx_code code
= GET_CODE (x
);
5447 const enum machine_mode mode
= GET_MODE (x
);
5449 switch (GET_RTX_CLASS (code
))
5452 return simplify_unary_operation (code
, mode
,
5453 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5454 case RTX_COMM_ARITH
:
5455 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5456 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5458 /* Fall through.... */
5461 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5464 case RTX_BITFIELD_OPS
:
5465 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5466 XEXP (x
, 0), XEXP (x
, 1),
5470 case RTX_COMM_COMPARE
:
5471 return simplify_relational_operation (code
, mode
,
5472 ((GET_MODE (XEXP (x
, 0))
5474 ? GET_MODE (XEXP (x
, 0))
5475 : GET_MODE (XEXP (x
, 1))),
5481 return simplify_subreg (mode
, SUBREG_REG (x
),
5482 GET_MODE (SUBREG_REG (x
)),
5489 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5490 if (GET_CODE (XEXP (x
, 0)) == HIGH
5491 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))