1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
53 static bool plus_minus_operand_p (const_rtx
);
54 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
61 enum machine_mode
, rtx
, rtx
);
62 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode
, const_rtx i
)
71 return gen_int_mode (- INTVAL (i
), mode
);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
80 unsigned HOST_WIDE_INT val
;
83 if (GET_MODE_CLASS (mode
) != MODE_INT
)
86 width
= GET_MODE_BITSIZE (mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
93 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x
) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x
) == 0)
97 val
= CONST_DOUBLE_HIGH (x
);
98 width
-= HOST_BITS_PER_WIDE_INT
;
103 if (width
< HOST_BITS_PER_WIDE_INT
)
104 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
105 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
117 /* If this simplifies, do it. */
118 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0
, op1
))
125 tem
= op0
, op0
= op1
, op1
= tem
;
127 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x
)
136 enum machine_mode cmode
;
137 HOST_WIDE_INT offset
= 0;
139 switch (GET_CODE (x
))
145 /* Handle float extensions of constant pool references. */
147 c
= avoid_constant_pool_reference (tmp
);
148 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
152 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
161 if (GET_MODE (x
) == BLKmode
)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr
= targetm
.delegitimize_address (addr
);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr
) == CONST
171 && GET_CODE (XEXP (addr
, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
174 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
175 addr
= XEXP (XEXP (addr
, 0), 0);
178 if (GET_CODE (addr
) == LO_SUM
)
179 addr
= XEXP (addr
, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr
) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr
))
186 c
= get_pool_constant (addr
);
187 cmode
= get_pool_mode (addr
);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset
!= 0 || cmode
!= GET_MODE (x
))
194 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
195 if (tem
&& CONSTANT_P (tem
))
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x
)
215 || GET_CODE (MEM_OFFSET (x
)) == CONST_INT
))
217 tree decl
= MEM_EXPR (x
);
218 enum machine_mode mode
= GET_MODE (x
);
219 HOST_WIDE_INT offset
= 0;
221 switch (TREE_CODE (decl
))
231 case ARRAY_RANGE_REF
:
236 case VIEW_CONVERT_EXPR
:
238 HOST_WIDE_INT bitsize
, bitpos
;
240 int unsignedp
= 0, volatilep
= 0;
242 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
243 &mode
, &unsignedp
, &volatilep
, false);
244 if (bitsize
!= GET_MODE_BITSIZE (mode
)
245 || (bitpos
% BITS_PER_UNIT
)
246 || (toffset
&& !host_integerp (toffset
, 0)))
250 offset
+= bitpos
/ BITS_PER_UNIT
;
252 offset
+= TREE_INT_CST_LOW (toffset
);
259 && mode
== GET_MODE (x
)
260 && TREE_CODE (decl
) == VAR_DECL
261 && (TREE_STATIC (decl
)
262 || DECL_THREAD_LOCAL_P (decl
))
263 && DECL_RTL_SET_P (decl
)
264 && MEM_P (DECL_RTL (decl
)))
269 offset
+= INTVAL (MEM_OFFSET (x
));
271 newx
= DECL_RTL (decl
);
275 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
284 || (GET_CODE (o
) == PLUS
285 && GET_CODE (XEXP (o
, 1)) == CONST_INT
286 && (offset
== INTVAL (XEXP (o
, 1))
287 || (GET_CODE (n
) == PLUS
288 && GET_CODE (XEXP (n
, 1)) == CONST_INT
289 && (INTVAL (XEXP (n
, 1)) + offset
290 == INTVAL (XEXP (o
, 1)))
291 && (n
= XEXP (n
, 0))))
292 && (o
= XEXP (o
, 0))))
293 && rtx_equal_p (o
, n
)))
294 x
= adjust_address_nv (newx
, mode
, offset
);
296 else if (GET_MODE (x
) == GET_MODE (newx
)
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
310 enum machine_mode op_mode
)
314 /* If this simplifies, use it. */
315 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
318 return gen_rtx_fmt_e (code
, mode
, op
);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
325 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
329 /* If this simplifies, use it. */
330 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
334 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
342 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
346 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
350 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
353 /* Replace all occurrences of OLD_RTX in X with FN (X', DATA), where X'
354 is an expression in X that is equal to OLD_RTX. Canonicalize and
357 If FN is null, assume FN (X', DATA) == copy_rtx (DATA). */
360 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
361 rtx (*fn
) (rtx
, void *), void *data
)
363 enum rtx_code code
= GET_CODE (x
);
364 enum machine_mode mode
= GET_MODE (x
);
365 enum machine_mode op_mode
;
367 rtx op0
, op1
, op2
, newx
, op
;
371 /* If X is OLD_RTX, return FN (X, DATA), with a null FN. Otherwise,
372 if this is an expression, try to build a new expression, substituting
373 recursively. If we can't do anything, return our input. */
375 if (rtx_equal_p (x
, old_rtx
))
380 return copy_rtx ((rtx
) data
);
383 switch (GET_RTX_CLASS (code
))
387 op_mode
= GET_MODE (op0
);
388 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
389 if (op0
== XEXP (x
, 0))
391 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
395 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
396 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
397 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
399 return simplify_gen_binary (code
, mode
, op0
, op1
);
402 case RTX_COMM_COMPARE
:
405 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
406 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
407 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
408 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
410 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
413 case RTX_BITFIELD_OPS
:
415 op_mode
= GET_MODE (op0
);
416 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
417 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
418 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
419 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
421 if (op_mode
== VOIDmode
)
422 op_mode
= GET_MODE (op0
);
423 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
428 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
429 if (op0
== SUBREG_REG (x
))
431 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
432 GET_MODE (SUBREG_REG (x
)),
434 return op0
? op0
: x
;
441 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
442 if (op0
== XEXP (x
, 0))
444 return replace_equiv_address_nv (x
, op0
);
446 else if (code
== LO_SUM
)
448 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
449 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
451 /* (lo_sum (high x) x) -> x */
452 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
455 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
457 return gen_rtx_LO_SUM (mode
, op0
, op1
);
466 fmt
= GET_RTX_FORMAT (code
);
467 for (i
= 0; fmt
[i
]; i
++)
472 newvec
= XVEC (newx
, i
);
473 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
475 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
477 if (op
!= RTVEC_ELT (vec
, j
))
481 newvec
= shallow_copy_rtvec (vec
);
483 newx
= shallow_copy_rtx (x
);
484 XVEC (newx
, i
) = newvec
;
486 RTVEC_ELT (newvec
, j
) = op
;
492 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
493 if (op
!= XEXP (x
, i
))
496 newx
= shallow_copy_rtx (x
);
504 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
505 resulting RTX. Return a new RTX which is as simplified as possible. */
508 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
510 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
513 /* Try to simplify a unary operation CODE whose output mode is to be
514 MODE with input operand OP whose mode was originally OP_MODE.
515 Return zero if no simplification can be made. */
517 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
518 rtx op
, enum machine_mode op_mode
)
522 trueop
= avoid_constant_pool_reference (op
);
524 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
528 return simplify_unary_operation_1 (code
, mode
, op
);
531 /* Perform some simplifications we can do even if the operands
534 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
536 enum rtx_code reversed
;
542 /* (not (not X)) == X. */
543 if (GET_CODE (op
) == NOT
)
546 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
547 comparison is all ones. */
548 if (COMPARISON_P (op
)
549 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
550 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
551 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
552 XEXP (op
, 0), XEXP (op
, 1));
554 /* (not (plus X -1)) can become (neg X). */
555 if (GET_CODE (op
) == PLUS
556 && XEXP (op
, 1) == constm1_rtx
)
557 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
559 /* Similarly, (not (neg X)) is (plus X -1). */
560 if (GET_CODE (op
) == NEG
)
561 return plus_constant (XEXP (op
, 0), -1);
563 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
564 if (GET_CODE (op
) == XOR
565 && CONST_INT_P (XEXP (op
, 1))
566 && (temp
= simplify_unary_operation (NOT
, mode
,
567 XEXP (op
, 1), mode
)) != 0)
568 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
570 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
571 if (GET_CODE (op
) == PLUS
572 && CONST_INT_P (XEXP (op
, 1))
573 && mode_signbit_p (mode
, XEXP (op
, 1))
574 && (temp
= simplify_unary_operation (NOT
, mode
,
575 XEXP (op
, 1), mode
)) != 0)
576 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
579 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
580 operands other than 1, but that is not valid. We could do a
581 similar simplification for (not (lshiftrt C X)) where C is
582 just the sign bit, but this doesn't seem common enough to
584 if (GET_CODE (op
) == ASHIFT
585 && XEXP (op
, 0) == const1_rtx
)
587 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
588 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
591 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
592 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
593 so we can perform the above simplification. */
595 if (STORE_FLAG_VALUE
== -1
596 && GET_CODE (op
) == ASHIFTRT
597 && GET_CODE (XEXP (op
, 1))
598 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
599 return simplify_gen_relational (GE
, mode
, VOIDmode
,
600 XEXP (op
, 0), const0_rtx
);
603 if (GET_CODE (op
) == SUBREG
604 && subreg_lowpart_p (op
)
605 && (GET_MODE_SIZE (GET_MODE (op
))
606 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
607 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
608 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
610 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
613 x
= gen_rtx_ROTATE (inner_mode
,
614 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
616 XEXP (SUBREG_REG (op
), 1));
617 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
620 /* Apply De Morgan's laws to reduce number of patterns for machines
621 with negating logical insns (and-not, nand, etc.). If result has
622 only one NOT, put it first, since that is how the patterns are
625 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
627 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
628 enum machine_mode op_mode
;
630 op_mode
= GET_MODE (in1
);
631 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
633 op_mode
= GET_MODE (in2
);
634 if (op_mode
== VOIDmode
)
636 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
638 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
641 in2
= in1
; in1
= tem
;
644 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
650 /* (neg (neg X)) == X. */
651 if (GET_CODE (op
) == NEG
)
654 /* (neg (plus X 1)) can become (not X). */
655 if (GET_CODE (op
) == PLUS
656 && XEXP (op
, 1) == const1_rtx
)
657 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
659 /* Similarly, (neg (not X)) is (plus X 1). */
660 if (GET_CODE (op
) == NOT
)
661 return plus_constant (XEXP (op
, 0), 1);
663 /* (neg (minus X Y)) can become (minus Y X). This transformation
664 isn't safe for modes with signed zeros, since if X and Y are
665 both +0, (minus Y X) is the same as (minus X Y). If the
666 rounding mode is towards +infinity (or -infinity) then the two
667 expressions will be rounded differently. */
668 if (GET_CODE (op
) == MINUS
669 && !HONOR_SIGNED_ZEROS (mode
)
670 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
671 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
673 if (GET_CODE (op
) == PLUS
674 && !HONOR_SIGNED_ZEROS (mode
)
675 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
677 /* (neg (plus A C)) is simplified to (minus -C A). */
678 if (CONST_INT_P (XEXP (op
, 1))
679 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
681 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
683 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
686 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
687 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
688 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
691 /* (neg (mult A B)) becomes (mult (neg A) B).
692 This works even for floating-point values. */
693 if (GET_CODE (op
) == MULT
694 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
696 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
697 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
700 /* NEG commutes with ASHIFT since it is multiplication. Only do
701 this if we can then eliminate the NEG (e.g., if the operand
703 if (GET_CODE (op
) == ASHIFT
)
705 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
707 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
710 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
711 C is equal to the width of MODE minus 1. */
712 if (GET_CODE (op
) == ASHIFTRT
713 && CONST_INT_P (XEXP (op
, 1))
714 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
715 return simplify_gen_binary (LSHIFTRT
, mode
,
716 XEXP (op
, 0), XEXP (op
, 1));
718 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
719 C is equal to the width of MODE minus 1. */
720 if (GET_CODE (op
) == LSHIFTRT
721 && CONST_INT_P (XEXP (op
, 1))
722 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
723 return simplify_gen_binary (ASHIFTRT
, mode
,
724 XEXP (op
, 0), XEXP (op
, 1));
726 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
727 if (GET_CODE (op
) == XOR
728 && XEXP (op
, 1) == const1_rtx
729 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
730 return plus_constant (XEXP (op
, 0), -1);
732 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
733 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
734 if (GET_CODE (op
) == LT
735 && XEXP (op
, 1) == const0_rtx
736 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
738 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
739 int isize
= GET_MODE_BITSIZE (inner
);
740 if (STORE_FLAG_VALUE
== 1)
742 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
743 GEN_INT (isize
- 1));
746 if (GET_MODE_BITSIZE (mode
) > isize
)
747 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
748 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
750 else if (STORE_FLAG_VALUE
== -1)
752 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
753 GEN_INT (isize
- 1));
756 if (GET_MODE_BITSIZE (mode
) > isize
)
757 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
758 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
764 /* We can't handle truncation to a partial integer mode here
765 because we don't know the real bitsize of the partial
767 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
770 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
771 if ((GET_CODE (op
) == SIGN_EXTEND
772 || GET_CODE (op
) == ZERO_EXTEND
)
773 && GET_MODE (XEXP (op
, 0)) == mode
)
776 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
777 (OP:SI foo:SI) if OP is NEG or ABS. */
778 if ((GET_CODE (op
) == ABS
779 || GET_CODE (op
) == NEG
)
780 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
781 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
782 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
783 return simplify_gen_unary (GET_CODE (op
), mode
,
784 XEXP (XEXP (op
, 0), 0), mode
);
786 /* (truncate:A (subreg:B (truncate:C X) 0)) is
788 if (GET_CODE (op
) == SUBREG
789 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
790 && subreg_lowpart_p (op
))
791 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
792 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
794 /* If we know that the value is already truncated, we can
795 replace the TRUNCATE with a SUBREG. Note that this is also
796 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
797 modes we just have to apply a different definition for
798 truncation. But don't do this for an (LSHIFTRT (MULT ...))
799 since this will cause problems with the umulXi3_highpart
801 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
802 GET_MODE_BITSIZE (GET_MODE (op
)))
803 ? (num_sign_bit_copies (op
, GET_MODE (op
))
804 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op
))
805 - GET_MODE_BITSIZE (mode
)))
806 : truncated_to_mode (mode
, op
))
807 && ! (GET_CODE (op
) == LSHIFTRT
808 && GET_CODE (XEXP (op
, 0)) == MULT
))
809 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
811 /* A truncate of a comparison can be replaced with a subreg if
812 STORE_FLAG_VALUE permits. This is like the previous test,
813 but it works even if the comparison is done in a mode larger
814 than HOST_BITS_PER_WIDE_INT. */
815 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
817 && ((HOST_WIDE_INT
) STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
818 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
822 if (DECIMAL_FLOAT_MODE_P (mode
))
825 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
826 if (GET_CODE (op
) == FLOAT_EXTEND
827 && GET_MODE (XEXP (op
, 0)) == mode
)
830 /* (float_truncate:SF (float_truncate:DF foo:XF))
831 = (float_truncate:SF foo:XF).
832 This may eliminate double rounding, so it is unsafe.
834 (float_truncate:SF (float_extend:XF foo:DF))
835 = (float_truncate:SF foo:DF).
837 (float_truncate:DF (float_extend:XF foo:SF))
838 = (float_extend:SF foo:DF). */
839 if ((GET_CODE (op
) == FLOAT_TRUNCATE
840 && flag_unsafe_math_optimizations
)
841 || GET_CODE (op
) == FLOAT_EXTEND
)
842 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
844 > GET_MODE_SIZE (mode
)
845 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
849 /* (float_truncate (float x)) is (float x) */
850 if (GET_CODE (op
) == FLOAT
851 && (flag_unsafe_math_optimizations
852 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
853 && ((unsigned)significand_size (GET_MODE (op
))
854 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
855 - num_sign_bit_copies (XEXP (op
, 0),
856 GET_MODE (XEXP (op
, 0))))))))
857 return simplify_gen_unary (FLOAT
, mode
,
859 GET_MODE (XEXP (op
, 0)));
861 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
862 (OP:SF foo:SF) if OP is NEG or ABS. */
863 if ((GET_CODE (op
) == ABS
864 || GET_CODE (op
) == NEG
)
865 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
866 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
867 return simplify_gen_unary (GET_CODE (op
), mode
,
868 XEXP (XEXP (op
, 0), 0), mode
);
870 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
871 is (float_truncate:SF x). */
872 if (GET_CODE (op
) == SUBREG
873 && subreg_lowpart_p (op
)
874 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
875 return SUBREG_REG (op
);
879 if (DECIMAL_FLOAT_MODE_P (mode
))
882 /* (float_extend (float_extend x)) is (float_extend x)
884 (float_extend (float x)) is (float x) assuming that double
885 rounding can't happen.
887 if (GET_CODE (op
) == FLOAT_EXTEND
888 || (GET_CODE (op
) == FLOAT
889 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
890 && ((unsigned)significand_size (GET_MODE (op
))
891 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
892 - num_sign_bit_copies (XEXP (op
, 0),
893 GET_MODE (XEXP (op
, 0)))))))
894 return simplify_gen_unary (GET_CODE (op
), mode
,
896 GET_MODE (XEXP (op
, 0)));
901 /* (abs (neg <foo>)) -> (abs <foo>) */
902 if (GET_CODE (op
) == NEG
)
903 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
904 GET_MODE (XEXP (op
, 0)));
906 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
908 if (GET_MODE (op
) == VOIDmode
)
911 /* If operand is something known to be positive, ignore the ABS. */
912 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
913 || ((GET_MODE_BITSIZE (GET_MODE (op
))
914 <= HOST_BITS_PER_WIDE_INT
)
915 && ((nonzero_bits (op
, GET_MODE (op
))
917 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
921 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
922 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
923 return gen_rtx_NEG (mode
, op
);
928 /* (ffs (*_extend <X>)) = (ffs <X>) */
929 if (GET_CODE (op
) == SIGN_EXTEND
930 || GET_CODE (op
) == ZERO_EXTEND
)
931 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
932 GET_MODE (XEXP (op
, 0)));
936 switch (GET_CODE (op
))
940 /* (popcount (zero_extend <X>)) = (popcount <X>) */
941 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
942 GET_MODE (XEXP (op
, 0)));
946 /* Rotations don't affect popcount. */
947 if (!side_effects_p (XEXP (op
, 1)))
948 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
949 GET_MODE (XEXP (op
, 0)));
958 switch (GET_CODE (op
))
964 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
965 GET_MODE (XEXP (op
, 0)));
969 /* Rotations don't affect parity. */
970 if (!side_effects_p (XEXP (op
, 1)))
971 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
972 GET_MODE (XEXP (op
, 0)));
981 /* (bswap (bswap x)) -> x. */
982 if (GET_CODE (op
) == BSWAP
)
987 /* (float (sign_extend <X>)) = (float <X>). */
988 if (GET_CODE (op
) == SIGN_EXTEND
)
989 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
990 GET_MODE (XEXP (op
, 0)));
994 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
995 becomes just the MINUS if its mode is MODE. This allows
996 folding switch statements on machines using casesi (such as
998 if (GET_CODE (op
) == TRUNCATE
999 && GET_MODE (XEXP (op
, 0)) == mode
1000 && GET_CODE (XEXP (op
, 0)) == MINUS
1001 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1002 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1003 return XEXP (op
, 0);
1005 /* Check for a sign extension of a subreg of a promoted
1006 variable, where the promotion is sign-extended, and the
1007 target mode is the same as the variable's promotion. */
1008 if (GET_CODE (op
) == SUBREG
1009 && SUBREG_PROMOTED_VAR_P (op
)
1010 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1011 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1012 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1014 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1015 /* As we do not know which address space the pointer is refering to,
1016 we can do this only if the target does not support different pointer
1017 or address modes depending on the address space. */
1018 if (target_default_pointer_address_modes_p ()
1019 && ! POINTERS_EXTEND_UNSIGNED
1020 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1022 || (GET_CODE (op
) == SUBREG
1023 && REG_P (SUBREG_REG (op
))
1024 && REG_POINTER (SUBREG_REG (op
))
1025 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1026 return convert_memory_address (Pmode
, op
);
1031 /* Check for a zero extension of a subreg of a promoted
1032 variable, where the promotion is zero-extended, and the
1033 target mode is the same as the variable's promotion. */
1034 if (GET_CODE (op
) == SUBREG
1035 && SUBREG_PROMOTED_VAR_P (op
)
1036 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1037 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1038 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1040 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1041 /* As we do not know which address space the pointer is refering to,
1042 we can do this only if the target does not support different pointer
1043 or address modes depending on the address space. */
1044 if (target_default_pointer_address_modes_p ()
1045 && POINTERS_EXTEND_UNSIGNED
> 0
1046 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1048 || (GET_CODE (op
) == SUBREG
1049 && REG_P (SUBREG_REG (op
))
1050 && REG_POINTER (SUBREG_REG (op
))
1051 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1052 return convert_memory_address (Pmode
, op
);
1063 /* Try to compute the value of a unary operation CODE whose output mode is to
1064 be MODE with input operand OP whose mode was originally OP_MODE.
1065 Return zero if the value cannot be computed. */
1067 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1068 rtx op
, enum machine_mode op_mode
)
1070 unsigned int width
= GET_MODE_BITSIZE (mode
);
1072 if (code
== VEC_DUPLICATE
)
1074 gcc_assert (VECTOR_MODE_P (mode
));
1075 if (GET_MODE (op
) != VOIDmode
)
1077 if (!VECTOR_MODE_P (GET_MODE (op
)))
1078 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1080 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1083 if (CONST_INT_P (op
) || GET_CODE (op
) == CONST_DOUBLE
1084 || GET_CODE (op
) == CONST_VECTOR
)
1086 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1087 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1088 rtvec v
= rtvec_alloc (n_elts
);
1091 if (GET_CODE (op
) != CONST_VECTOR
)
1092 for (i
= 0; i
< n_elts
; i
++)
1093 RTVEC_ELT (v
, i
) = op
;
1096 enum machine_mode inmode
= GET_MODE (op
);
1097 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1098 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1100 gcc_assert (in_n_elts
< n_elts
);
1101 gcc_assert ((n_elts
% in_n_elts
) == 0);
1102 for (i
= 0; i
< n_elts
; i
++)
1103 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1105 return gen_rtx_CONST_VECTOR (mode
, v
);
1109 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1111 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1112 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1113 enum machine_mode opmode
= GET_MODE (op
);
1114 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1115 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1116 rtvec v
= rtvec_alloc (n_elts
);
1119 gcc_assert (op_n_elts
== n_elts
);
1120 for (i
= 0; i
< n_elts
; i
++)
1122 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1123 CONST_VECTOR_ELT (op
, i
),
1124 GET_MODE_INNER (opmode
));
1127 RTVEC_ELT (v
, i
) = x
;
1129 return gen_rtx_CONST_VECTOR (mode
, v
);
1132 /* The order of these tests is critical so that, for example, we don't
1133 check the wrong mode (input vs. output) for a conversion operation,
1134 such as FIX. At some point, this should be simplified. */
1136 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
1137 && (GET_CODE (op
) == CONST_DOUBLE
|| CONST_INT_P (op
)))
1139 HOST_WIDE_INT hv
, lv
;
1142 if (CONST_INT_P (op
))
1143 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1145 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1147 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1148 d
= real_value_truncate (mode
, d
);
1149 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1151 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
1152 && (GET_CODE (op
) == CONST_DOUBLE
1153 || CONST_INT_P (op
)))
1155 HOST_WIDE_INT hv
, lv
;
1158 if (CONST_INT_P (op
))
1159 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1161 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1163 if (op_mode
== VOIDmode
)
1165 /* We don't know how to interpret negative-looking numbers in
1166 this case, so don't try to fold those. */
1170 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
1173 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1175 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1176 d
= real_value_truncate (mode
, d
);
1177 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1180 if (CONST_INT_P (op
)
1181 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1183 HOST_WIDE_INT arg0
= INTVAL (op
);
1197 val
= (arg0
>= 0 ? arg0
: - arg0
);
1201 /* Don't use ffs here. Instead, get low order bit and then its
1202 number. If arg0 is zero, this will return 0, as desired. */
1203 arg0
&= GET_MODE_MASK (mode
);
1204 val
= exact_log2 (arg0
& (- arg0
)) + 1;
1208 arg0
&= GET_MODE_MASK (mode
);
1209 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1212 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1216 arg0
&= GET_MODE_MASK (mode
);
1219 /* Even if the value at zero is undefined, we have to come
1220 up with some replacement. Seems good enough. */
1221 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1222 val
= GET_MODE_BITSIZE (mode
);
1225 val
= exact_log2 (arg0
& -arg0
);
1229 arg0
&= GET_MODE_MASK (mode
);
1232 val
++, arg0
&= arg0
- 1;
1236 arg0
&= GET_MODE_MASK (mode
);
1239 val
++, arg0
&= arg0
- 1;
1248 for (s
= 0; s
< width
; s
+= 8)
1250 unsigned int d
= width
- s
- 8;
1251 unsigned HOST_WIDE_INT byte
;
1252 byte
= (arg0
>> s
) & 0xff;
1263 /* When zero-extending a CONST_INT, we need to know its
1265 gcc_assert (op_mode
!= VOIDmode
);
1266 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1268 /* If we were really extending the mode,
1269 we would have to distinguish between zero-extension
1270 and sign-extension. */
1271 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1274 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1275 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1281 if (op_mode
== VOIDmode
)
1283 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1285 /* If we were really extending the mode,
1286 we would have to distinguish between zero-extension
1287 and sign-extension. */
1288 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1291 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1294 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1296 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1297 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1305 case FLOAT_TRUNCATE
:
1317 return gen_int_mode (val
, mode
);
1320 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1321 for a DImode operation on a CONST_INT. */
1322 else if (GET_MODE (op
) == VOIDmode
1323 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1324 && (GET_CODE (op
) == CONST_DOUBLE
1325 || CONST_INT_P (op
)))
1327 unsigned HOST_WIDE_INT l1
, lv
;
1328 HOST_WIDE_INT h1
, hv
;
1330 if (GET_CODE (op
) == CONST_DOUBLE
)
1331 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1333 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1343 neg_double (l1
, h1
, &lv
, &hv
);
1348 neg_double (l1
, h1
, &lv
, &hv
);
1360 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
1363 lv
= exact_log2 (l1
& -l1
) + 1;
1369 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1370 - HOST_BITS_PER_WIDE_INT
;
1372 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1373 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1374 lv
= GET_MODE_BITSIZE (mode
);
1380 lv
= exact_log2 (l1
& -l1
);
1382 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
1383 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1384 lv
= GET_MODE_BITSIZE (mode
);
1412 for (s
= 0; s
< width
; s
+= 8)
1414 unsigned int d
= width
- s
- 8;
1415 unsigned HOST_WIDE_INT byte
;
1417 if (s
< HOST_BITS_PER_WIDE_INT
)
1418 byte
= (l1
>> s
) & 0xff;
1420 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1422 if (d
< HOST_BITS_PER_WIDE_INT
)
1425 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1431 /* This is just a change-of-mode, so do nothing. */
1436 gcc_assert (op_mode
!= VOIDmode
);
1438 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1442 lv
= l1
& GET_MODE_MASK (op_mode
);
1446 if (op_mode
== VOIDmode
1447 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1451 lv
= l1
& GET_MODE_MASK (op_mode
);
1452 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1453 && (lv
& ((HOST_WIDE_INT
) 1
1454 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1455 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1457 hv
= HWI_SIGN_EXTEND (lv
);
1468 return immed_double_const (lv
, hv
, mode
);
1471 else if (GET_CODE (op
) == CONST_DOUBLE
1472 && SCALAR_FLOAT_MODE_P (mode
))
1474 REAL_VALUE_TYPE d
, t
;
1475 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1480 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1482 real_sqrt (&t
, mode
, &d
);
1486 d
= REAL_VALUE_ABS (d
);
1489 d
= REAL_VALUE_NEGATE (d
);
1491 case FLOAT_TRUNCATE
:
1492 d
= real_value_truncate (mode
, d
);
1495 /* All this does is change the mode. */
1498 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1505 real_to_target (tmp
, &d
, GET_MODE (op
));
1506 for (i
= 0; i
< 4; i
++)
1508 real_from_target (&d
, tmp
, mode
);
1514 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1517 else if (GET_CODE (op
) == CONST_DOUBLE
1518 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1519 && GET_MODE_CLASS (mode
) == MODE_INT
1520 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1522 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1523 operators are intentionally left unspecified (to ease implementation
1524 by target backends), for consistency, this routine implements the
1525 same semantics for constant folding as used by the middle-end. */
1527 /* This was formerly used only for non-IEEE float.
1528 eggert@twinsun.com says it is safe for IEEE also. */
1529 HOST_WIDE_INT xh
, xl
, th
, tl
;
1530 REAL_VALUE_TYPE x
, t
;
1531 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1535 if (REAL_VALUE_ISNAN (x
))
1538 /* Test against the signed upper bound. */
1539 if (width
> HOST_BITS_PER_WIDE_INT
)
1541 th
= ((unsigned HOST_WIDE_INT
) 1
1542 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1548 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1550 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1551 if (REAL_VALUES_LESS (t
, x
))
1558 /* Test against the signed lower bound. */
1559 if (width
> HOST_BITS_PER_WIDE_INT
)
1561 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1567 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1569 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1570 if (REAL_VALUES_LESS (x
, t
))
1576 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1580 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1583 /* Test against the unsigned upper bound. */
1584 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1589 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1591 th
= ((unsigned HOST_WIDE_INT
) 1
1592 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1598 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1600 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1601 if (REAL_VALUES_LESS (t
, x
))
1608 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1614 return immed_double_const (xl
, xh
, mode
);
1620 /* Subroutine of simplify_binary_operation to simplify a commutative,
1621 associative binary operation CODE with result mode MODE, operating
1622 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1623 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1624 canonicalization is possible. */
1627 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1632 /* Linearize the operator to the left. */
1633 if (GET_CODE (op1
) == code
)
1635 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1636 if (GET_CODE (op0
) == code
)
1638 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1639 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1642 /* "a op (b op c)" becomes "(b op c) op a". */
1643 if (! swap_commutative_operands_p (op1
, op0
))
1644 return simplify_gen_binary (code
, mode
, op1
, op0
);
1651 if (GET_CODE (op0
) == code
)
1653 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1654 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1656 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1657 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1660 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1661 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1663 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1665 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1666 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1668 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1675 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1676 and OP1. Return 0 if no simplification is possible.
1678 Don't use this for relational operations such as EQ or LT.
1679 Use simplify_relational_operation instead. */
1681 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1684 rtx trueop0
, trueop1
;
1687 /* Relational operations don't work here. We must know the mode
1688 of the operands in order to do the comparison correctly.
1689 Assuming a full word can give incorrect results.
1690 Consider comparing 128 with -128 in QImode. */
1691 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1692 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1694 /* Make sure the constant is second. */
1695 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1696 && swap_commutative_operands_p (op0
, op1
))
1698 tem
= op0
, op0
= op1
, op1
= tem
;
1701 trueop0
= avoid_constant_pool_reference (op0
);
1702 trueop1
= avoid_constant_pool_reference (op1
);
1704 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1707 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1710 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1711 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1712 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1713 actual constants. */
1716 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1717 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1719 rtx tem
, reversed
, opleft
, opright
;
1721 unsigned int width
= GET_MODE_BITSIZE (mode
);
1723 /* Even if we can't compute a constant result,
1724 there are some cases worth simplifying. */
1729 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1730 when x is NaN, infinite, or finite and nonzero. They aren't
1731 when x is -0 and the rounding mode is not towards -infinity,
1732 since (-0) + 0 is then 0. */
1733 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1736 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1737 transformations are safe even for IEEE. */
1738 if (GET_CODE (op0
) == NEG
)
1739 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1740 else if (GET_CODE (op1
) == NEG
)
1741 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1743 /* (~a) + 1 -> -a */
1744 if (INTEGRAL_MODE_P (mode
)
1745 && GET_CODE (op0
) == NOT
1746 && trueop1
== const1_rtx
)
1747 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1749 /* Handle both-operands-constant cases. We can only add
1750 CONST_INTs to constants since the sum of relocatable symbols
1751 can't be handled by most assemblers. Don't add CONST_INT
1752 to CONST_INT since overflow won't be computed properly if wider
1753 than HOST_BITS_PER_WIDE_INT. */
1755 if ((GET_CODE (op0
) == CONST
1756 || GET_CODE (op0
) == SYMBOL_REF
1757 || GET_CODE (op0
) == LABEL_REF
)
1758 && CONST_INT_P (op1
))
1759 return plus_constant (op0
, INTVAL (op1
));
1760 else if ((GET_CODE (op1
) == CONST
1761 || GET_CODE (op1
) == SYMBOL_REF
1762 || GET_CODE (op1
) == LABEL_REF
)
1763 && CONST_INT_P (op0
))
1764 return plus_constant (op1
, INTVAL (op0
));
1766 /* See if this is something like X * C - X or vice versa or
1767 if the multiplication is written as a shift. If so, we can
1768 distribute and make a new multiply, shift, or maybe just
1769 have X (if C is 2 in the example above). But don't make
1770 something more expensive than we had before. */
1772 if (SCALAR_INT_MODE_P (mode
))
1774 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1775 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1776 rtx lhs
= op0
, rhs
= op1
;
1778 if (GET_CODE (lhs
) == NEG
)
1782 lhs
= XEXP (lhs
, 0);
1784 else if (GET_CODE (lhs
) == MULT
1785 && CONST_INT_P (XEXP (lhs
, 1)))
1787 coeff0l
= INTVAL (XEXP (lhs
, 1));
1788 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1789 lhs
= XEXP (lhs
, 0);
1791 else if (GET_CODE (lhs
) == ASHIFT
1792 && CONST_INT_P (XEXP (lhs
, 1))
1793 && INTVAL (XEXP (lhs
, 1)) >= 0
1794 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1796 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1798 lhs
= XEXP (lhs
, 0);
1801 if (GET_CODE (rhs
) == NEG
)
1805 rhs
= XEXP (rhs
, 0);
1807 else if (GET_CODE (rhs
) == MULT
1808 && CONST_INT_P (XEXP (rhs
, 1)))
1810 coeff1l
= INTVAL (XEXP (rhs
, 1));
1811 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1812 rhs
= XEXP (rhs
, 0);
1814 else if (GET_CODE (rhs
) == ASHIFT
1815 && CONST_INT_P (XEXP (rhs
, 1))
1816 && INTVAL (XEXP (rhs
, 1)) >= 0
1817 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1819 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1821 rhs
= XEXP (rhs
, 0);
1824 if (rtx_equal_p (lhs
, rhs
))
1826 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1828 unsigned HOST_WIDE_INT l
;
1830 bool speed
= optimize_function_for_speed_p (cfun
);
1832 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1833 coeff
= immed_double_const (l
, h
, mode
);
1835 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1836 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
1841 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1842 if ((CONST_INT_P (op1
)
1843 || GET_CODE (op1
) == CONST_DOUBLE
)
1844 && GET_CODE (op0
) == XOR
1845 && (CONST_INT_P (XEXP (op0
, 1))
1846 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1847 && mode_signbit_p (mode
, op1
))
1848 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1849 simplify_gen_binary (XOR
, mode
, op1
,
1852 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1853 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1854 && GET_CODE (op0
) == MULT
1855 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1859 in1
= XEXP (XEXP (op0
, 0), 0);
1860 in2
= XEXP (op0
, 1);
1861 return simplify_gen_binary (MINUS
, mode
, op1
,
1862 simplify_gen_binary (MULT
, mode
,
1866 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1867 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1869 if (COMPARISON_P (op0
)
1870 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1871 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1872 && (reversed
= reversed_comparison (op0
, mode
)))
1874 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1876 /* If one of the operands is a PLUS or a MINUS, see if we can
1877 simplify this by the associative law.
1878 Don't use the associative law for floating point.
1879 The inaccuracy makes it nonassociative,
1880 and subtle programs can break if operations are associated. */
1882 if (INTEGRAL_MODE_P (mode
)
1883 && (plus_minus_operand_p (op0
)
1884 || plus_minus_operand_p (op1
))
1885 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1888 /* Reassociate floating point addition only when the user
1889 specifies associative math operations. */
1890 if (FLOAT_MODE_P (mode
)
1891 && flag_associative_math
)
1893 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1900 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1901 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1902 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1903 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1905 rtx xop00
= XEXP (op0
, 0);
1906 rtx xop10
= XEXP (op1
, 0);
1909 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1911 if (REG_P (xop00
) && REG_P (xop10
)
1912 && GET_MODE (xop00
) == GET_MODE (xop10
)
1913 && REGNO (xop00
) == REGNO (xop10
)
1914 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1915 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1922 /* We can't assume x-x is 0 even with non-IEEE floating point,
1923 but since it is zero except in very strange circumstances, we
1924 will treat it as zero with -ffinite-math-only. */
1925 if (rtx_equal_p (trueop0
, trueop1
)
1926 && ! side_effects_p (op0
)
1927 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
1928 return CONST0_RTX (mode
);
1930 /* Change subtraction from zero into negation. (0 - x) is the
1931 same as -x when x is NaN, infinite, or finite and nonzero.
1932 But if the mode has signed zeros, and does not round towards
1933 -infinity, then 0 - 0 is 0, not -0. */
1934 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1935 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1937 /* (-1 - a) is ~a. */
1938 if (trueop0
== constm1_rtx
)
1939 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1941 /* Subtracting 0 has no effect unless the mode has signed zeros
1942 and supports rounding towards -infinity. In such a case,
1944 if (!(HONOR_SIGNED_ZEROS (mode
)
1945 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1946 && trueop1
== CONST0_RTX (mode
))
1949 /* See if this is something like X * C - X or vice versa or
1950 if the multiplication is written as a shift. If so, we can
1951 distribute and make a new multiply, shift, or maybe just
1952 have X (if C is 2 in the example above). But don't make
1953 something more expensive than we had before. */
1955 if (SCALAR_INT_MODE_P (mode
))
1957 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1958 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1959 rtx lhs
= op0
, rhs
= op1
;
1961 if (GET_CODE (lhs
) == NEG
)
1965 lhs
= XEXP (lhs
, 0);
1967 else if (GET_CODE (lhs
) == MULT
1968 && CONST_INT_P (XEXP (lhs
, 1)))
1970 coeff0l
= INTVAL (XEXP (lhs
, 1));
1971 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1972 lhs
= XEXP (lhs
, 0);
1974 else if (GET_CODE (lhs
) == ASHIFT
1975 && CONST_INT_P (XEXP (lhs
, 1))
1976 && INTVAL (XEXP (lhs
, 1)) >= 0
1977 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1979 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1981 lhs
= XEXP (lhs
, 0);
1984 if (GET_CODE (rhs
) == NEG
)
1988 rhs
= XEXP (rhs
, 0);
1990 else if (GET_CODE (rhs
) == MULT
1991 && CONST_INT_P (XEXP (rhs
, 1)))
1993 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1994 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1995 rhs
= XEXP (rhs
, 0);
1997 else if (GET_CODE (rhs
) == ASHIFT
1998 && CONST_INT_P (XEXP (rhs
, 1))
1999 && INTVAL (XEXP (rhs
, 1)) >= 0
2000 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2002 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
2004 rhs
= XEXP (rhs
, 0);
2007 if (rtx_equal_p (lhs
, rhs
))
2009 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2011 unsigned HOST_WIDE_INT l
;
2013 bool speed
= optimize_function_for_speed_p (cfun
);
2015 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
2016 coeff
= immed_double_const (l
, h
, mode
);
2018 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2019 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
2024 /* (a - (-b)) -> (a + b). True even for IEEE. */
2025 if (GET_CODE (op1
) == NEG
)
2026 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2028 /* (-x - c) may be simplified as (-c - x). */
2029 if (GET_CODE (op0
) == NEG
2030 && (CONST_INT_P (op1
)
2031 || GET_CODE (op1
) == CONST_DOUBLE
))
2033 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2035 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2038 /* Don't let a relocatable value get a negative coeff. */
2039 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2040 return simplify_gen_binary (PLUS
, mode
,
2042 neg_const_int (mode
, op1
));
2044 /* (x - (x & y)) -> (x & ~y) */
2045 if (GET_CODE (op1
) == AND
)
2047 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2049 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2050 GET_MODE (XEXP (op1
, 1)));
2051 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2053 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2055 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2056 GET_MODE (XEXP (op1
, 0)));
2057 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2061 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2062 by reversing the comparison code if valid. */
2063 if (STORE_FLAG_VALUE
== 1
2064 && trueop0
== const1_rtx
2065 && COMPARISON_P (op1
)
2066 && (reversed
= reversed_comparison (op1
, mode
)))
2069 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2070 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2071 && GET_CODE (op1
) == MULT
2072 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2076 in1
= XEXP (XEXP (op1
, 0), 0);
2077 in2
= XEXP (op1
, 1);
2078 return simplify_gen_binary (PLUS
, mode
,
2079 simplify_gen_binary (MULT
, mode
,
2084 /* Canonicalize (minus (neg A) (mult B C)) to
2085 (minus (mult (neg B) C) A). */
2086 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2087 && GET_CODE (op1
) == MULT
2088 && GET_CODE (op0
) == NEG
)
2092 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2093 in2
= XEXP (op1
, 1);
2094 return simplify_gen_binary (MINUS
, mode
,
2095 simplify_gen_binary (MULT
, mode
,
2100 /* If one of the operands is a PLUS or a MINUS, see if we can
2101 simplify this by the associative law. This will, for example,
2102 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2103 Don't use the associative law for floating point.
2104 The inaccuracy makes it nonassociative,
2105 and subtle programs can break if operations are associated. */
2107 if (INTEGRAL_MODE_P (mode
)
2108 && (plus_minus_operand_p (op0
)
2109 || plus_minus_operand_p (op1
))
2110 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2115 if (trueop1
== constm1_rtx
)
2116 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2118 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2119 x is NaN, since x * 0 is then also NaN. Nor is it valid
2120 when the mode has signed zeros, since multiplying a negative
2121 number by 0 will give -0, not 0. */
2122 if (!HONOR_NANS (mode
)
2123 && !HONOR_SIGNED_ZEROS (mode
)
2124 && trueop1
== CONST0_RTX (mode
)
2125 && ! side_effects_p (op0
))
2128 /* In IEEE floating point, x*1 is not equivalent to x for
2130 if (!HONOR_SNANS (mode
)
2131 && trueop1
== CONST1_RTX (mode
))
2134 /* Convert multiply by constant power of two into shift unless
2135 we are still generating RTL. This test is a kludge. */
2136 if (CONST_INT_P (trueop1
)
2137 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
2138 /* If the mode is larger than the host word size, and the
2139 uppermost bit is set, then this isn't a power of two due
2140 to implicit sign extension. */
2141 && (width
<= HOST_BITS_PER_WIDE_INT
2142 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2143 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2145 /* Likewise for multipliers wider than a word. */
2146 if (GET_CODE (trueop1
) == CONST_DOUBLE
2147 && (GET_MODE (trueop1
) == VOIDmode
2148 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
2149 && GET_MODE (op0
) == mode
2150 && CONST_DOUBLE_LOW (trueop1
) == 0
2151 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
2152 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2153 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2155 /* x*2 is x+x and x*(-1) is -x */
2156 if (GET_CODE (trueop1
) == CONST_DOUBLE
2157 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2158 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2159 && GET_MODE (op0
) == mode
)
2162 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2164 if (REAL_VALUES_EQUAL (d
, dconst2
))
2165 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2167 if (!HONOR_SNANS (mode
)
2168 && REAL_VALUES_EQUAL (d
, dconstm1
))
2169 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2172 /* Optimize -x * -x as x * x. */
2173 if (FLOAT_MODE_P (mode
)
2174 && GET_CODE (op0
) == NEG
2175 && GET_CODE (op1
) == NEG
2176 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2177 && !side_effects_p (XEXP (op0
, 0)))
2178 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2180 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2181 if (SCALAR_FLOAT_MODE_P (mode
)
2182 && GET_CODE (op0
) == ABS
2183 && GET_CODE (op1
) == ABS
2184 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2185 && !side_effects_p (XEXP (op0
, 0)))
2186 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2188 /* Reassociate multiplication, but for floating point MULTs
2189 only when the user specifies unsafe math optimizations. */
2190 if (! FLOAT_MODE_P (mode
)
2191 || flag_unsafe_math_optimizations
)
2193 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2200 if (trueop1
== const0_rtx
)
2202 if (CONST_INT_P (trueop1
)
2203 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2204 == GET_MODE_MASK (mode
)))
2206 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2208 /* A | (~A) -> -1 */
2209 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2210 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2211 && ! side_effects_p (op0
)
2212 && SCALAR_INT_MODE_P (mode
))
2215 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2216 if (CONST_INT_P (op1
)
2217 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2218 && (nonzero_bits (op0
, mode
) & ~INTVAL (op1
)) == 0)
2221 /* Canonicalize (X & C1) | C2. */
2222 if (GET_CODE (op0
) == AND
2223 && CONST_INT_P (trueop1
)
2224 && CONST_INT_P (XEXP (op0
, 1)))
2226 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2227 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2228 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2230 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2232 && !side_effects_p (XEXP (op0
, 0)))
2235 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2236 if (((c1
|c2
) & mask
) == mask
)
2237 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2239 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2240 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2242 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2243 gen_int_mode (c1
& ~c2
, mode
));
2244 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2248 /* Convert (A & B) | A to A. */
2249 if (GET_CODE (op0
) == AND
2250 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2251 || rtx_equal_p (XEXP (op0
, 1), op1
))
2252 && ! side_effects_p (XEXP (op0
, 0))
2253 && ! side_effects_p (XEXP (op0
, 1)))
2256 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2257 mode size to (rotate A CX). */
2259 if (GET_CODE (op1
) == ASHIFT
2260 || GET_CODE (op1
) == SUBREG
)
2271 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2272 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2273 && CONST_INT_P (XEXP (opleft
, 1))
2274 && CONST_INT_P (XEXP (opright
, 1))
2275 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2276 == GET_MODE_BITSIZE (mode
)))
2277 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2279 /* Same, but for ashift that has been "simplified" to a wider mode
2280 by simplify_shift_const. */
2282 if (GET_CODE (opleft
) == SUBREG
2283 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2284 && GET_CODE (opright
) == LSHIFTRT
2285 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2286 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2287 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2288 && (GET_MODE_SIZE (GET_MODE (opleft
))
2289 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2290 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2291 SUBREG_REG (XEXP (opright
, 0)))
2292 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2293 && CONST_INT_P (XEXP (opright
, 1))
2294 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2295 == GET_MODE_BITSIZE (mode
)))
2296 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2297 XEXP (SUBREG_REG (opleft
), 1));
2299 /* If we have (ior (and (X C1) C2)), simplify this by making
2300 C1 as small as possible if C1 actually changes. */
2301 if (CONST_INT_P (op1
)
2302 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2303 || INTVAL (op1
) > 0)
2304 && GET_CODE (op0
) == AND
2305 && CONST_INT_P (XEXP (op0
, 1))
2306 && CONST_INT_P (op1
)
2307 && (INTVAL (XEXP (op0
, 1)) & INTVAL (op1
)) != 0)
2308 return simplify_gen_binary (IOR
, mode
,
2310 (AND
, mode
, XEXP (op0
, 0),
2311 GEN_INT (INTVAL (XEXP (op0
, 1))
2315 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2316 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2317 the PLUS does not affect any of the bits in OP1: then we can do
2318 the IOR as a PLUS and we can associate. This is valid if OP1
2319 can be safely shifted left C bits. */
2320 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2321 && GET_CODE (XEXP (op0
, 0)) == PLUS
2322 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2323 && CONST_INT_P (XEXP (op0
, 1))
2324 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2326 int count
= INTVAL (XEXP (op0
, 1));
2327 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2329 if (mask
>> count
== INTVAL (trueop1
)
2330 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2331 return simplify_gen_binary (ASHIFTRT
, mode
,
2332 plus_constant (XEXP (op0
, 0), mask
),
2336 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2342 if (trueop1
== const0_rtx
)
2344 if (CONST_INT_P (trueop1
)
2345 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2346 == GET_MODE_MASK (mode
)))
2347 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2348 if (rtx_equal_p (trueop0
, trueop1
)
2349 && ! side_effects_p (op0
)
2350 && GET_MODE_CLASS (mode
) != MODE_CC
)
2351 return CONST0_RTX (mode
);
2353 /* Canonicalize XOR of the most significant bit to PLUS. */
2354 if ((CONST_INT_P (op1
)
2355 || GET_CODE (op1
) == CONST_DOUBLE
)
2356 && mode_signbit_p (mode
, op1
))
2357 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2358 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2359 if ((CONST_INT_P (op1
)
2360 || GET_CODE (op1
) == CONST_DOUBLE
)
2361 && GET_CODE (op0
) == PLUS
2362 && (CONST_INT_P (XEXP (op0
, 1))
2363 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2364 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2365 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2366 simplify_gen_binary (XOR
, mode
, op1
,
2369 /* If we are XORing two things that have no bits in common,
2370 convert them into an IOR. This helps to detect rotation encoded
2371 using those methods and possibly other simplifications. */
2373 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2374 && (nonzero_bits (op0
, mode
)
2375 & nonzero_bits (op1
, mode
)) == 0)
2376 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2378 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2379 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2382 int num_negated
= 0;
2384 if (GET_CODE (op0
) == NOT
)
2385 num_negated
++, op0
= XEXP (op0
, 0);
2386 if (GET_CODE (op1
) == NOT
)
2387 num_negated
++, op1
= XEXP (op1
, 0);
2389 if (num_negated
== 2)
2390 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2391 else if (num_negated
== 1)
2392 return simplify_gen_unary (NOT
, mode
,
2393 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2397 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2398 correspond to a machine insn or result in further simplifications
2399 if B is a constant. */
2401 if (GET_CODE (op0
) == AND
2402 && rtx_equal_p (XEXP (op0
, 1), op1
)
2403 && ! side_effects_p (op1
))
2404 return simplify_gen_binary (AND
, mode
,
2405 simplify_gen_unary (NOT
, mode
,
2406 XEXP (op0
, 0), mode
),
2409 else if (GET_CODE (op0
) == AND
2410 && rtx_equal_p (XEXP (op0
, 0), op1
)
2411 && ! side_effects_p (op1
))
2412 return simplify_gen_binary (AND
, mode
,
2413 simplify_gen_unary (NOT
, mode
,
2414 XEXP (op0
, 1), mode
),
2417 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2418 comparison if STORE_FLAG_VALUE is 1. */
2419 if (STORE_FLAG_VALUE
== 1
2420 && trueop1
== const1_rtx
2421 && COMPARISON_P (op0
)
2422 && (reversed
= reversed_comparison (op0
, mode
)))
2425 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2426 is (lt foo (const_int 0)), so we can perform the above
2427 simplification if STORE_FLAG_VALUE is 1. */
2429 if (STORE_FLAG_VALUE
== 1
2430 && trueop1
== const1_rtx
2431 && GET_CODE (op0
) == LSHIFTRT
2432 && CONST_INT_P (XEXP (op0
, 1))
2433 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2434 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2436 /* (xor (comparison foo bar) (const_int sign-bit))
2437 when STORE_FLAG_VALUE is the sign bit. */
2438 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2439 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2440 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2441 && trueop1
== const_true_rtx
2442 && COMPARISON_P (op0
)
2443 && (reversed
= reversed_comparison (op0
, mode
)))
2446 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2452 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2454 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
2456 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2457 HOST_WIDE_INT nzop1
;
2458 if (CONST_INT_P (trueop1
))
2460 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2461 /* If we are turning off bits already known off in OP0, we need
2463 if ((nzop0
& ~val1
) == 0)
2466 nzop1
= nonzero_bits (trueop1
, mode
);
2467 /* If we are clearing all the nonzero bits, the result is zero. */
2468 if ((nzop1
& nzop0
) == 0
2469 && !side_effects_p (op0
) && !side_effects_p (op1
))
2470 return CONST0_RTX (mode
);
2472 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2473 && GET_MODE_CLASS (mode
) != MODE_CC
)
2476 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2477 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2478 && ! side_effects_p (op0
)
2479 && GET_MODE_CLASS (mode
) != MODE_CC
)
2480 return CONST0_RTX (mode
);
2482 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2483 there are no nonzero bits of C outside of X's mode. */
2484 if ((GET_CODE (op0
) == SIGN_EXTEND
2485 || GET_CODE (op0
) == ZERO_EXTEND
)
2486 && CONST_INT_P (trueop1
)
2487 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2488 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2489 & INTVAL (trueop1
)) == 0)
2491 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2492 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2493 gen_int_mode (INTVAL (trueop1
),
2495 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2498 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2499 we might be able to further simplify the AND with X and potentially
2500 remove the truncation altogether. */
2501 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2503 rtx x
= XEXP (op0
, 0);
2504 enum machine_mode xmode
= GET_MODE (x
);
2505 tem
= simplify_gen_binary (AND
, xmode
, x
,
2506 gen_int_mode (INTVAL (trueop1
), xmode
));
2507 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2510 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2511 if (GET_CODE (op0
) == IOR
2512 && CONST_INT_P (trueop1
)
2513 && CONST_INT_P (XEXP (op0
, 1)))
2515 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2516 return simplify_gen_binary (IOR
, mode
,
2517 simplify_gen_binary (AND
, mode
,
2518 XEXP (op0
, 0), op1
),
2519 gen_int_mode (tmp
, mode
));
2522 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2523 insn (and may simplify more). */
2524 if (GET_CODE (op0
) == XOR
2525 && rtx_equal_p (XEXP (op0
, 0), op1
)
2526 && ! side_effects_p (op1
))
2527 return simplify_gen_binary (AND
, mode
,
2528 simplify_gen_unary (NOT
, mode
,
2529 XEXP (op0
, 1), mode
),
2532 if (GET_CODE (op0
) == XOR
2533 && rtx_equal_p (XEXP (op0
, 1), op1
)
2534 && ! side_effects_p (op1
))
2535 return simplify_gen_binary (AND
, mode
,
2536 simplify_gen_unary (NOT
, mode
,
2537 XEXP (op0
, 0), mode
),
2540 /* Similarly for (~(A ^ B)) & A. */
2541 if (GET_CODE (op0
) == NOT
2542 && GET_CODE (XEXP (op0
, 0)) == XOR
2543 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2544 && ! side_effects_p (op1
))
2545 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2547 if (GET_CODE (op0
) == NOT
2548 && GET_CODE (XEXP (op0
, 0)) == XOR
2549 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2550 && ! side_effects_p (op1
))
2551 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2553 /* Convert (A | B) & A to A. */
2554 if (GET_CODE (op0
) == IOR
2555 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2556 || rtx_equal_p (XEXP (op0
, 1), op1
))
2557 && ! side_effects_p (XEXP (op0
, 0))
2558 && ! side_effects_p (XEXP (op0
, 1)))
2561 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2562 ((A & N) + B) & M -> (A + B) & M
2563 Similarly if (N & M) == 0,
2564 ((A | N) + B) & M -> (A + B) & M
2565 and for - instead of + and/or ^ instead of |.
2566 Also, if (N & M) == 0, then
2567 (A +- N) & M -> A & M. */
2568 if (CONST_INT_P (trueop1
)
2569 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2570 && ~INTVAL (trueop1
)
2571 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
2572 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2577 pmop
[0] = XEXP (op0
, 0);
2578 pmop
[1] = XEXP (op0
, 1);
2580 if (CONST_INT_P (pmop
[1])
2581 && (INTVAL (pmop
[1]) & INTVAL (trueop1
)) == 0)
2582 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2584 for (which
= 0; which
< 2; which
++)
2587 switch (GET_CODE (tem
))
2590 if (CONST_INT_P (XEXP (tem
, 1))
2591 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
2592 == INTVAL (trueop1
))
2593 pmop
[which
] = XEXP (tem
, 0);
2597 if (CONST_INT_P (XEXP (tem
, 1))
2598 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
2599 pmop
[which
] = XEXP (tem
, 0);
2606 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2608 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2610 return simplify_gen_binary (code
, mode
, tem
, op1
);
2614 /* (and X (ior (not X) Y) -> (and X Y) */
2615 if (GET_CODE (op1
) == IOR
2616 && GET_CODE (XEXP (op1
, 0)) == NOT
2617 && op0
== XEXP (XEXP (op1
, 0), 0))
2618 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2620 /* (and (ior (not X) Y) X) -> (and X Y) */
2621 if (GET_CODE (op0
) == IOR
2622 && GET_CODE (XEXP (op0
, 0)) == NOT
2623 && op1
== XEXP (XEXP (op0
, 0), 0))
2624 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2626 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2632 /* 0/x is 0 (or x&0 if x has side-effects). */
2633 if (trueop0
== CONST0_RTX (mode
))
2635 if (side_effects_p (op1
))
2636 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2640 if (trueop1
== CONST1_RTX (mode
))
2641 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2642 /* Convert divide by power of two into shift. */
2643 if (CONST_INT_P (trueop1
)
2644 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
2645 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2649 /* Handle floating point and integers separately. */
2650 if (SCALAR_FLOAT_MODE_P (mode
))
2652 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2653 safe for modes with NaNs, since 0.0 / 0.0 will then be
2654 NaN rather than 0.0. Nor is it safe for modes with signed
2655 zeros, since dividing 0 by a negative number gives -0.0 */
2656 if (trueop0
== CONST0_RTX (mode
)
2657 && !HONOR_NANS (mode
)
2658 && !HONOR_SIGNED_ZEROS (mode
)
2659 && ! side_effects_p (op1
))
2662 if (trueop1
== CONST1_RTX (mode
)
2663 && !HONOR_SNANS (mode
))
2666 if (GET_CODE (trueop1
) == CONST_DOUBLE
2667 && trueop1
!= CONST0_RTX (mode
))
2670 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2673 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2674 && !HONOR_SNANS (mode
))
2675 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2677 /* Change FP division by a constant into multiplication.
2678 Only do this with -freciprocal-math. */
2679 if (flag_reciprocal_math
2680 && !REAL_VALUES_EQUAL (d
, dconst0
))
2682 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2683 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2684 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2690 /* 0/x is 0 (or x&0 if x has side-effects). */
2691 if (trueop0
== CONST0_RTX (mode
))
2693 if (side_effects_p (op1
))
2694 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2698 if (trueop1
== CONST1_RTX (mode
))
2699 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2701 if (trueop1
== constm1_rtx
)
2703 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2704 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2710 /* 0%x is 0 (or x&0 if x has side-effects). */
2711 if (trueop0
== CONST0_RTX (mode
))
2713 if (side_effects_p (op1
))
2714 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2717 /* x%1 is 0 (of x&0 if x has side-effects). */
2718 if (trueop1
== CONST1_RTX (mode
))
2720 if (side_effects_p (op0
))
2721 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2722 return CONST0_RTX (mode
);
2724 /* Implement modulus by power of two as AND. */
2725 if (CONST_INT_P (trueop1
)
2726 && exact_log2 (INTVAL (trueop1
)) > 0)
2727 return simplify_gen_binary (AND
, mode
, op0
,
2728 GEN_INT (INTVAL (op1
) - 1));
2732 /* 0%x is 0 (or x&0 if x has side-effects). */
2733 if (trueop0
== CONST0_RTX (mode
))
2735 if (side_effects_p (op1
))
2736 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2739 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2740 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2742 if (side_effects_p (op0
))
2743 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2744 return CONST0_RTX (mode
);
2751 if (trueop1
== CONST0_RTX (mode
))
2753 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2755 /* Rotating ~0 always results in ~0. */
2756 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
2757 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2758 && ! side_effects_p (op1
))
2761 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
2763 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
2764 if (val
!= INTVAL (op1
))
2765 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
2772 if (trueop1
== CONST0_RTX (mode
))
2774 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2776 goto canonicalize_shift
;
2779 if (trueop1
== CONST0_RTX (mode
))
2781 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2783 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2784 if (GET_CODE (op0
) == CLZ
2785 && CONST_INT_P (trueop1
)
2786 && STORE_FLAG_VALUE
== 1
2787 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
2789 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2790 unsigned HOST_WIDE_INT zero_val
= 0;
2792 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
2793 && zero_val
== GET_MODE_BITSIZE (imode
)
2794 && INTVAL (trueop1
) == exact_log2 (zero_val
))
2795 return simplify_gen_relational (EQ
, mode
, imode
,
2796 XEXP (op0
, 0), const0_rtx
);
2798 goto canonicalize_shift
;
2801 if (width
<= HOST_BITS_PER_WIDE_INT
2802 && CONST_INT_P (trueop1
)
2803 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2804 && ! side_effects_p (op0
))
2806 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2808 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2814 if (width
<= HOST_BITS_PER_WIDE_INT
2815 && CONST_INT_P (trueop1
)
2816 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2817 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2818 && ! side_effects_p (op0
))
2820 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2822 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2828 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2830 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2832 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2838 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2840 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2842 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2855 /* ??? There are simplifications that can be done. */
2859 if (!VECTOR_MODE_P (mode
))
2861 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2862 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2863 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2864 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2865 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
2867 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2868 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2871 /* Extract a scalar element from a nested VEC_SELECT expression
2872 (with optional nested VEC_CONCAT expression). Some targets
2873 (i386) extract scalar element from a vector using chain of
2874 nested VEC_SELECT expressions. When input operand is a memory
2875 operand, this operation can be simplified to a simple scalar
2876 load from an offseted memory address. */
2877 if (GET_CODE (trueop0
) == VEC_SELECT
)
2879 rtx op0
= XEXP (trueop0
, 0);
2880 rtx op1
= XEXP (trueop0
, 1);
2882 enum machine_mode opmode
= GET_MODE (op0
);
2883 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
2884 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
2886 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
2892 gcc_assert (GET_CODE (op1
) == PARALLEL
);
2893 gcc_assert (i
< n_elts
);
2895 /* Select element, pointed by nested selector. */
2896 elem
= INTVAL (XVECEXP (op1
, 0, i
));
2898 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2899 if (GET_CODE (op0
) == VEC_CONCAT
)
2901 rtx op00
= XEXP (op0
, 0);
2902 rtx op01
= XEXP (op0
, 1);
2904 enum machine_mode mode00
, mode01
;
2905 int n_elts00
, n_elts01
;
2907 mode00
= GET_MODE (op00
);
2908 mode01
= GET_MODE (op01
);
2910 /* Find out number of elements of each operand. */
2911 if (VECTOR_MODE_P (mode00
))
2913 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
2914 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
2919 if (VECTOR_MODE_P (mode01
))
2921 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
2922 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
2927 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
2929 /* Select correct operand of VEC_CONCAT
2930 and adjust selector. */
2931 if (elem
< n_elts01
)
2942 vec
= rtvec_alloc (1);
2943 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
2945 tmp
= gen_rtx_fmt_ee (code
, mode
,
2946 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
2949 if (GET_CODE (trueop0
) == VEC_DUPLICATE
2950 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
2951 return XEXP (trueop0
, 0);
2955 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2956 gcc_assert (GET_MODE_INNER (mode
)
2957 == GET_MODE_INNER (GET_MODE (trueop0
)));
2958 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2960 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2962 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2963 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2964 rtvec v
= rtvec_alloc (n_elts
);
2967 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2968 for (i
= 0; i
< n_elts
; i
++)
2970 rtx x
= XVECEXP (trueop1
, 0, i
);
2972 gcc_assert (CONST_INT_P (x
));
2973 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2977 return gen_rtx_CONST_VECTOR (mode
, v
);
2981 if (XVECLEN (trueop1
, 0) == 1
2982 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
2983 && GET_CODE (trueop0
) == VEC_CONCAT
)
2986 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
2988 /* Try to find the element in the VEC_CONCAT. */
2989 while (GET_MODE (vec
) != mode
2990 && GET_CODE (vec
) == VEC_CONCAT
)
2992 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
2993 if (offset
< vec_size
)
2994 vec
= XEXP (vec
, 0);
2998 vec
= XEXP (vec
, 1);
3000 vec
= avoid_constant_pool_reference (vec
);
3003 if (GET_MODE (vec
) == mode
)
3010 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3011 ? GET_MODE (trueop0
)
3012 : GET_MODE_INNER (mode
));
3013 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3014 ? GET_MODE (trueop1
)
3015 : GET_MODE_INNER (mode
));
3017 gcc_assert (VECTOR_MODE_P (mode
));
3018 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3019 == GET_MODE_SIZE (mode
));
3021 if (VECTOR_MODE_P (op0_mode
))
3022 gcc_assert (GET_MODE_INNER (mode
)
3023 == GET_MODE_INNER (op0_mode
));
3025 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3027 if (VECTOR_MODE_P (op1_mode
))
3028 gcc_assert (GET_MODE_INNER (mode
)
3029 == GET_MODE_INNER (op1_mode
));
3031 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3033 if ((GET_CODE (trueop0
) == CONST_VECTOR
3034 || CONST_INT_P (trueop0
)
3035 || GET_CODE (trueop0
) == CONST_DOUBLE
)
3036 && (GET_CODE (trueop1
) == CONST_VECTOR
3037 || CONST_INT_P (trueop1
)
3038 || GET_CODE (trueop1
) == CONST_DOUBLE
))
3040 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3041 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3042 rtvec v
= rtvec_alloc (n_elts
);
3044 unsigned in_n_elts
= 1;
3046 if (VECTOR_MODE_P (op0_mode
))
3047 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3048 for (i
= 0; i
< n_elts
; i
++)
3052 if (!VECTOR_MODE_P (op0_mode
))
3053 RTVEC_ELT (v
, i
) = trueop0
;
3055 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3059 if (!VECTOR_MODE_P (op1_mode
))
3060 RTVEC_ELT (v
, i
) = trueop1
;
3062 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3067 return gen_rtx_CONST_VECTOR (mode
, v
);
3080 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3083 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3085 unsigned int width
= GET_MODE_BITSIZE (mode
);
3087 if (VECTOR_MODE_P (mode
)
3088 && code
!= VEC_CONCAT
3089 && GET_CODE (op0
) == CONST_VECTOR
3090 && GET_CODE (op1
) == CONST_VECTOR
)
3092 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3093 enum machine_mode op0mode
= GET_MODE (op0
);
3094 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3095 enum machine_mode op1mode
= GET_MODE (op1
);
3096 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3097 rtvec v
= rtvec_alloc (n_elts
);
3100 gcc_assert (op0_n_elts
== n_elts
);
3101 gcc_assert (op1_n_elts
== n_elts
);
3102 for (i
= 0; i
< n_elts
; i
++)
3104 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3105 CONST_VECTOR_ELT (op0
, i
),
3106 CONST_VECTOR_ELT (op1
, i
));
3109 RTVEC_ELT (v
, i
) = x
;
3112 return gen_rtx_CONST_VECTOR (mode
, v
);
3115 if (VECTOR_MODE_P (mode
)
3116 && code
== VEC_CONCAT
3117 && (CONST_INT_P (op0
)
3118 || GET_CODE (op0
) == CONST_DOUBLE
3119 || GET_CODE (op0
) == CONST_FIXED
)
3120 && (CONST_INT_P (op1
)
3121 || GET_CODE (op1
) == CONST_DOUBLE
3122 || GET_CODE (op1
) == CONST_FIXED
))
3124 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3125 rtvec v
= rtvec_alloc (n_elts
);
3127 gcc_assert (n_elts
>= 2);
3130 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3131 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3133 RTVEC_ELT (v
, 0) = op0
;
3134 RTVEC_ELT (v
, 1) = op1
;
3138 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3139 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3142 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3143 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3144 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3146 for (i
= 0; i
< op0_n_elts
; ++i
)
3147 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3148 for (i
= 0; i
< op1_n_elts
; ++i
)
3149 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3152 return gen_rtx_CONST_VECTOR (mode
, v
);
3155 if (SCALAR_FLOAT_MODE_P (mode
)
3156 && GET_CODE (op0
) == CONST_DOUBLE
3157 && GET_CODE (op1
) == CONST_DOUBLE
3158 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3169 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3171 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3173 for (i
= 0; i
< 4; i
++)
3190 real_from_target (&r
, tmp0
, mode
);
3191 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3195 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3198 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3199 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3200 real_convert (&f0
, mode
, &f0
);
3201 real_convert (&f1
, mode
, &f1
);
3203 if (HONOR_SNANS (mode
)
3204 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3208 && REAL_VALUES_EQUAL (f1
, dconst0
)
3209 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3212 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3213 && flag_trapping_math
3214 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3216 int s0
= REAL_VALUE_NEGATIVE (f0
);
3217 int s1
= REAL_VALUE_NEGATIVE (f1
);
3222 /* Inf + -Inf = NaN plus exception. */
3227 /* Inf - Inf = NaN plus exception. */
3232 /* Inf / Inf = NaN plus exception. */
3239 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3240 && flag_trapping_math
3241 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3242 || (REAL_VALUE_ISINF (f1
)
3243 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3244 /* Inf * 0 = NaN plus exception. */
3247 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3249 real_convert (&result
, mode
, &value
);
3251 /* Don't constant fold this floating point operation if
3252 the result has overflowed and flag_trapping_math. */
3254 if (flag_trapping_math
3255 && MODE_HAS_INFINITIES (mode
)
3256 && REAL_VALUE_ISINF (result
)
3257 && !REAL_VALUE_ISINF (f0
)
3258 && !REAL_VALUE_ISINF (f1
))
3259 /* Overflow plus exception. */
3262 /* Don't constant fold this floating point operation if the
3263 result may dependent upon the run-time rounding mode and
3264 flag_rounding_math is set, or if GCC's software emulation
3265 is unable to accurately represent the result. */
3267 if ((flag_rounding_math
3268 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3269 && (inexact
|| !real_identical (&result
, &value
)))
3272 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3276 /* We can fold some multi-word operations. */
3277 if (GET_MODE_CLASS (mode
) == MODE_INT
3278 && width
== HOST_BITS_PER_WIDE_INT
* 2
3279 && (GET_CODE (op0
) == CONST_DOUBLE
|| CONST_INT_P (op0
))
3280 && (GET_CODE (op1
) == CONST_DOUBLE
|| CONST_INT_P (op1
)))
3282 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
3283 HOST_WIDE_INT h1
, h2
, hv
, ht
;
3285 if (GET_CODE (op0
) == CONST_DOUBLE
)
3286 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
3288 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
3290 if (GET_CODE (op1
) == CONST_DOUBLE
)
3291 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
3293 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
3298 /* A - B == A + (-B). */
3299 neg_double (l2
, h2
, &lv
, &hv
);
3302 /* Fall through.... */
3305 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3309 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3313 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3314 &lv
, &hv
, <
, &ht
))
3319 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3320 <
, &ht
, &lv
, &hv
))
3325 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3326 &lv
, &hv
, <
, &ht
))
3331 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3332 <
, &ht
, &lv
, &hv
))
3337 lv
= l1
& l2
, hv
= h1
& h2
;
3341 lv
= l1
| l2
, hv
= h1
| h2
;
3345 lv
= l1
^ l2
, hv
= h1
^ h2
;
3351 && ((unsigned HOST_WIDE_INT
) l1
3352 < (unsigned HOST_WIDE_INT
) l2
)))
3361 && ((unsigned HOST_WIDE_INT
) l1
3362 > (unsigned HOST_WIDE_INT
) l2
)))
3369 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
3371 && ((unsigned HOST_WIDE_INT
) l1
3372 < (unsigned HOST_WIDE_INT
) l2
)))
3379 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
3381 && ((unsigned HOST_WIDE_INT
) l1
3382 > (unsigned HOST_WIDE_INT
) l2
)))
3388 case LSHIFTRT
: case ASHIFTRT
:
3390 case ROTATE
: case ROTATERT
:
3391 if (SHIFT_COUNT_TRUNCATED
)
3392 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
3394 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
3397 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3398 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
3400 else if (code
== ASHIFT
)
3401 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
3402 else if (code
== ROTATE
)
3403 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3404 else /* code == ROTATERT */
3405 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3412 return immed_double_const (lv
, hv
, mode
);
3415 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3416 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3418 /* Get the integer argument values in two forms:
3419 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3421 arg0
= INTVAL (op0
);
3422 arg1
= INTVAL (op1
);
3424 if (width
< HOST_BITS_PER_WIDE_INT
)
3426 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3427 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3430 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3431 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3434 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3435 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3443 /* Compute the value of the arithmetic. */
3448 val
= arg0s
+ arg1s
;
3452 val
= arg0s
- arg1s
;
3456 val
= arg0s
* arg1s
;
3461 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3464 val
= arg0s
/ arg1s
;
3469 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3472 val
= arg0s
% arg1s
;
3477 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3480 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3485 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3488 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3506 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3507 the value is in range. We can't return any old value for
3508 out-of-range arguments because either the middle-end (via
3509 shift_truncation_mask) or the back-end might be relying on
3510 target-specific knowledge. Nor can we rely on
3511 shift_truncation_mask, since the shift might not be part of an
3512 ashlM3, lshrM3 or ashrM3 instruction. */
3513 if (SHIFT_COUNT_TRUNCATED
)
3514 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3515 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3518 val
= (code
== ASHIFT
3519 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3520 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3522 /* Sign-extend the result for arithmetic right shifts. */
3523 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3524 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
3532 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3533 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3541 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3542 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3546 /* Do nothing here. */
3550 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3554 val
= ((unsigned HOST_WIDE_INT
) arg0
3555 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3559 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3563 val
= ((unsigned HOST_WIDE_INT
) arg0
3564 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3577 /* ??? There are simplifications that can be done. */
3584 return gen_int_mode (val
, mode
);
3592 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3595 Rather than test for specific case, we do this by a brute-force method
3596 and do all possible simplifications until no more changes occur. Then
3597 we rebuild the operation. */
3599 struct simplify_plus_minus_op_data
3606 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3610 result
= (commutative_operand_precedence (y
)
3611 - commutative_operand_precedence (x
));
3615 /* Group together equal REGs to do more simplification. */
3616 if (REG_P (x
) && REG_P (y
))
3617 return REGNO (x
) > REGNO (y
);
3623 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3626 struct simplify_plus_minus_op_data ops
[8];
3628 int n_ops
= 2, input_ops
= 2;
3629 int changed
, n_constants
= 0, canonicalized
= 0;
3632 memset (ops
, 0, sizeof ops
);
3634 /* Set up the two operands and then expand them until nothing has been
3635 changed. If we run out of room in our array, give up; this should
3636 almost never happen. */
3641 ops
[1].neg
= (code
== MINUS
);
3647 for (i
= 0; i
< n_ops
; i
++)
3649 rtx this_op
= ops
[i
].op
;
3650 int this_neg
= ops
[i
].neg
;
3651 enum rtx_code this_code
= GET_CODE (this_op
);
3660 ops
[n_ops
].op
= XEXP (this_op
, 1);
3661 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3664 ops
[i
].op
= XEXP (this_op
, 0);
3667 canonicalized
|= this_neg
;
3671 ops
[i
].op
= XEXP (this_op
, 0);
3672 ops
[i
].neg
= ! this_neg
;
3679 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3680 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3681 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3683 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3684 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3685 ops
[n_ops
].neg
= this_neg
;
3693 /* ~a -> (-a - 1) */
3696 ops
[n_ops
].op
= constm1_rtx
;
3697 ops
[n_ops
++].neg
= this_neg
;
3698 ops
[i
].op
= XEXP (this_op
, 0);
3699 ops
[i
].neg
= !this_neg
;
3709 ops
[i
].op
= neg_const_int (mode
, this_op
);
3723 if (n_constants
> 1)
3726 gcc_assert (n_ops
>= 2);
3728 /* If we only have two operands, we can avoid the loops. */
3731 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3734 /* Get the two operands. Be careful with the order, especially for
3735 the cases where code == MINUS. */
3736 if (ops
[0].neg
&& ops
[1].neg
)
3738 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3741 else if (ops
[0].neg
)
3752 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3755 /* Now simplify each pair of operands until nothing changes. */
3758 /* Insertion sort is good enough for an eight-element array. */
3759 for (i
= 1; i
< n_ops
; i
++)
3761 struct simplify_plus_minus_op_data save
;
3763 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
3769 ops
[j
+ 1] = ops
[j
];
3770 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
3775 for (i
= n_ops
- 1; i
> 0; i
--)
3776 for (j
= i
- 1; j
>= 0; j
--)
3778 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
3779 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
3781 if (lhs
!= 0 && rhs
!= 0)
3783 enum rtx_code ncode
= PLUS
;
3789 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3791 else if (swap_commutative_operands_p (lhs
, rhs
))
3792 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3794 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
3795 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
3797 rtx tem_lhs
, tem_rhs
;
3799 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3800 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3801 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3803 if (tem
&& !CONSTANT_P (tem
))
3804 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3807 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3809 /* Reject "simplifications" that just wrap the two
3810 arguments in a CONST. Failure to do so can result
3811 in infinite recursion with simplify_binary_operation
3812 when it calls us to simplify CONST operations. */
3814 && ! (GET_CODE (tem
) == CONST
3815 && GET_CODE (XEXP (tem
, 0)) == ncode
3816 && XEXP (XEXP (tem
, 0), 0) == lhs
3817 && XEXP (XEXP (tem
, 0), 1) == rhs
))
3820 if (GET_CODE (tem
) == NEG
)
3821 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3822 if (CONST_INT_P (tem
) && lneg
)
3823 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3827 ops
[j
].op
= NULL_RTX
;
3834 /* If nothing changed, fail. */
3838 /* Pack all the operands to the lower-numbered entries. */
3839 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3849 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3851 && CONST_INT_P (ops
[1].op
)
3852 && CONSTANT_P (ops
[0].op
)
3854 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3856 /* We suppressed creation of trivial CONST expressions in the
3857 combination loop to avoid recursion. Create one manually now.
3858 The combination loop should have ensured that there is exactly
3859 one CONST_INT, and the sort will have ensured that it is last
3860 in the array and that any other constant will be next-to-last. */
3863 && CONST_INT_P (ops
[n_ops
- 1].op
)
3864 && CONSTANT_P (ops
[n_ops
- 2].op
))
3866 rtx value
= ops
[n_ops
- 1].op
;
3867 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3868 value
= neg_const_int (mode
, value
);
3869 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3873 /* Put a non-negated operand first, if possible. */
3875 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3878 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3887 /* Now make the result by performing the requested operations. */
3889 for (i
= 1; i
< n_ops
; i
++)
3890 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3891 mode
, result
, ops
[i
].op
);
3896 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3898 plus_minus_operand_p (const_rtx x
)
3900 return GET_CODE (x
) == PLUS
3901 || GET_CODE (x
) == MINUS
3902 || (GET_CODE (x
) == CONST
3903 && GET_CODE (XEXP (x
, 0)) == PLUS
3904 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3905 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3908 /* Like simplify_binary_operation except used for relational operators.
3909 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3910 not also be VOIDmode.
3912 CMP_MODE specifies in which mode the comparison is done in, so it is
3913 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3914 the operands or, if both are VOIDmode, the operands are compared in
3915 "infinite precision". */
3917 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3918 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3920 rtx tem
, trueop0
, trueop1
;
3922 if (cmp_mode
== VOIDmode
)
3923 cmp_mode
= GET_MODE (op0
);
3924 if (cmp_mode
== VOIDmode
)
3925 cmp_mode
= GET_MODE (op1
);
3927 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3930 if (SCALAR_FLOAT_MODE_P (mode
))
3932 if (tem
== const0_rtx
)
3933 return CONST0_RTX (mode
);
3934 #ifdef FLOAT_STORE_FLAG_VALUE
3936 REAL_VALUE_TYPE val
;
3937 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3938 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3944 if (VECTOR_MODE_P (mode
))
3946 if (tem
== const0_rtx
)
3947 return CONST0_RTX (mode
);
3948 #ifdef VECTOR_STORE_FLAG_VALUE
3953 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
3954 if (val
== NULL_RTX
)
3956 if (val
== const1_rtx
)
3957 return CONST1_RTX (mode
);
3959 units
= GET_MODE_NUNITS (mode
);
3960 v
= rtvec_alloc (units
);
3961 for (i
= 0; i
< units
; i
++)
3962 RTVEC_ELT (v
, i
) = val
;
3963 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
3973 /* For the following tests, ensure const0_rtx is op1. */
3974 if (swap_commutative_operands_p (op0
, op1
)
3975 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
3976 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
3978 /* If op0 is a compare, extract the comparison arguments from it. */
3979 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3980 return simplify_gen_relational (code
, mode
, VOIDmode
,
3981 XEXP (op0
, 0), XEXP (op0
, 1));
3983 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
3987 trueop0
= avoid_constant_pool_reference (op0
);
3988 trueop1
= avoid_constant_pool_reference (op1
);
3989 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
3993 /* This part of simplify_relational_operation is only used when CMP_MODE
3994 is not in class MODE_CC (i.e. it is a real comparison).
3996 MODE is the mode of the result, while CMP_MODE specifies in which
3997 mode the comparison is done in, so it is the mode of the operands. */
4000 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4001 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4003 enum rtx_code op0code
= GET_CODE (op0
);
4005 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4007 /* If op0 is a comparison, extract the comparison arguments
4011 if (GET_MODE (op0
) == mode
)
4012 return simplify_rtx (op0
);
4014 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4015 XEXP (op0
, 0), XEXP (op0
, 1));
4017 else if (code
== EQ
)
4019 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4020 if (new_code
!= UNKNOWN
)
4021 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4022 XEXP (op0
, 0), XEXP (op0
, 1));
4026 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4027 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4028 if ((code
== LTU
|| code
== GEU
)
4029 && GET_CODE (op0
) == PLUS
4030 && CONST_INT_P (XEXP (op0
, 1))
4031 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4032 || rtx_equal_p (op1
, XEXP (op0
, 1))))
4035 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4036 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4037 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4040 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4041 if ((code
== LTU
|| code
== GEU
)
4042 && GET_CODE (op0
) == PLUS
4043 && rtx_equal_p (op1
, XEXP (op0
, 1))
4044 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4045 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4046 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
, XEXP (op0
, 0));
4048 if (op1
== const0_rtx
)
4050 /* Canonicalize (GTU x 0) as (NE x 0). */
4052 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4053 /* Canonicalize (LEU x 0) as (EQ x 0). */
4055 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4057 else if (op1
== const1_rtx
)
4062 /* Canonicalize (GE x 1) as (GT x 0). */
4063 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4066 /* Canonicalize (GEU x 1) as (NE x 0). */
4067 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4070 /* Canonicalize (LT x 1) as (LE x 0). */
4071 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4074 /* Canonicalize (LTU x 1) as (EQ x 0). */
4075 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4081 else if (op1
== constm1_rtx
)
4083 /* Canonicalize (LE x -1) as (LT x 0). */
4085 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4086 /* Canonicalize (GT x -1) as (GE x 0). */
4088 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4091 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4092 if ((code
== EQ
|| code
== NE
)
4093 && (op0code
== PLUS
|| op0code
== MINUS
)
4095 && CONSTANT_P (XEXP (op0
, 1))
4096 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4098 rtx x
= XEXP (op0
, 0);
4099 rtx c
= XEXP (op0
, 1);
4101 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
4103 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
4106 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4107 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4109 && op1
== const0_rtx
4110 && GET_MODE_CLASS (mode
) == MODE_INT
4111 && cmp_mode
!= VOIDmode
4112 /* ??? Work-around BImode bugs in the ia64 backend. */
4114 && cmp_mode
!= BImode
4115 && nonzero_bits (op0
, cmp_mode
) == 1
4116 && STORE_FLAG_VALUE
== 1)
4117 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4118 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4119 : lowpart_subreg (mode
, op0
, cmp_mode
);
4121 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4122 if ((code
== EQ
|| code
== NE
)
4123 && op1
== const0_rtx
4125 return simplify_gen_relational (code
, mode
, cmp_mode
,
4126 XEXP (op0
, 0), XEXP (op0
, 1));
4128 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4129 if ((code
== EQ
|| code
== NE
)
4131 && rtx_equal_p (XEXP (op0
, 0), op1
)
4132 && !side_effects_p (XEXP (op0
, 0)))
4133 return simplify_gen_relational (code
, mode
, cmp_mode
,
4134 XEXP (op0
, 1), const0_rtx
);
4136 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4137 if ((code
== EQ
|| code
== NE
)
4139 && rtx_equal_p (XEXP (op0
, 1), op1
)
4140 && !side_effects_p (XEXP (op0
, 1)))
4141 return simplify_gen_relational (code
, mode
, cmp_mode
,
4142 XEXP (op0
, 0), const0_rtx
);
4144 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4145 if ((code
== EQ
|| code
== NE
)
4147 && (CONST_INT_P (op1
)
4148 || GET_CODE (op1
) == CONST_DOUBLE
)
4149 && (CONST_INT_P (XEXP (op0
, 1))
4150 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
4151 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4152 simplify_gen_binary (XOR
, cmp_mode
,
4153 XEXP (op0
, 1), op1
));
4155 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4161 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4162 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4163 XEXP (op0
, 0), const0_rtx
);
4168 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4169 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4170 XEXP (op0
, 0), const0_rtx
);
4189 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4190 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4191 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4192 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4193 For floating-point comparisons, assume that the operands were ordered. */
4196 comparison_result (enum rtx_code code
, int known_results
)
4202 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4205 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4209 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4212 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4216 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4219 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4222 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4224 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4227 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4229 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4232 return const_true_rtx
;
4240 /* Check if the given comparison (done in the given MODE) is actually a
4241 tautology or a contradiction.
4242 If no simplification is possible, this function returns zero.
4243 Otherwise, it returns either const_true_rtx or const0_rtx. */
4246 simplify_const_relational_operation (enum rtx_code code
,
4247 enum machine_mode mode
,
4254 gcc_assert (mode
!= VOIDmode
4255 || (GET_MODE (op0
) == VOIDmode
4256 && GET_MODE (op1
) == VOIDmode
));
4258 /* If op0 is a compare, extract the comparison arguments from it. */
4259 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4261 op1
= XEXP (op0
, 1);
4262 op0
= XEXP (op0
, 0);
4264 if (GET_MODE (op0
) != VOIDmode
)
4265 mode
= GET_MODE (op0
);
4266 else if (GET_MODE (op1
) != VOIDmode
)
4267 mode
= GET_MODE (op1
);
4272 /* We can't simplify MODE_CC values since we don't know what the
4273 actual comparison is. */
4274 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4277 /* Make sure the constant is second. */
4278 if (swap_commutative_operands_p (op0
, op1
))
4280 tem
= op0
, op0
= op1
, op1
= tem
;
4281 code
= swap_condition (code
);
4284 trueop0
= avoid_constant_pool_reference (op0
);
4285 trueop1
= avoid_constant_pool_reference (op1
);
4287 /* For integer comparisons of A and B maybe we can simplify A - B and can
4288 then simplify a comparison of that with zero. If A and B are both either
4289 a register or a CONST_INT, this can't help; testing for these cases will
4290 prevent infinite recursion here and speed things up.
4292 We can only do this for EQ and NE comparisons as otherwise we may
4293 lose or introduce overflow which we cannot disregard as undefined as
4294 we do not know the signedness of the operation on either the left or
4295 the right hand side of the comparison. */
4297 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4298 && (code
== EQ
|| code
== NE
)
4299 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4300 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4301 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4302 /* We cannot do this if tem is a nonzero address. */
4303 && ! nonzero_address_p (tem
))
4304 return simplify_const_relational_operation (signed_condition (code
),
4305 mode
, tem
, const0_rtx
);
4307 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4308 return const_true_rtx
;
4310 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4313 /* For modes without NaNs, if the two operands are equal, we know the
4314 result except if they have side-effects. Even with NaNs we know
4315 the result of unordered comparisons and, if signaling NaNs are
4316 irrelevant, also the result of LT/GT/LTGT. */
4317 if ((! HONOR_NANS (GET_MODE (trueop0
))
4318 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4319 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4320 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4321 && rtx_equal_p (trueop0
, trueop1
)
4322 && ! side_effects_p (trueop0
))
4323 return comparison_result (code
, CMP_EQ
);
4325 /* If the operands are floating-point constants, see if we can fold
4327 if (GET_CODE (trueop0
) == CONST_DOUBLE
4328 && GET_CODE (trueop1
) == CONST_DOUBLE
4329 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4331 REAL_VALUE_TYPE d0
, d1
;
4333 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4334 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4336 /* Comparisons are unordered iff at least one of the values is NaN. */
4337 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4347 return const_true_rtx
;
4360 return comparison_result (code
,
4361 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4362 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4365 /* Otherwise, see if the operands are both integers. */
4366 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4367 && (GET_CODE (trueop0
) == CONST_DOUBLE
4368 || CONST_INT_P (trueop0
))
4369 && (GET_CODE (trueop1
) == CONST_DOUBLE
4370 || CONST_INT_P (trueop1
)))
4372 int width
= GET_MODE_BITSIZE (mode
);
4373 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4374 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4376 /* Get the two words comprising each integer constant. */
4377 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
4379 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4380 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4384 l0u
= l0s
= INTVAL (trueop0
);
4385 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4388 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4390 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4391 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4395 l1u
= l1s
= INTVAL (trueop1
);
4396 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4399 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4400 we have to sign or zero-extend the values. */
4401 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4403 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4404 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4406 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4407 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
4409 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4410 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
4412 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4413 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4415 if (h0u
== h1u
&& l0u
== l1u
)
4416 return comparison_result (code
, CMP_EQ
);
4420 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4421 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4422 return comparison_result (code
, cr
);
4426 /* Optimize comparisons with upper and lower bounds. */
4427 if (SCALAR_INT_MODE_P (mode
)
4428 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
4429 && CONST_INT_P (trueop1
))
4432 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4433 HOST_WIDE_INT val
= INTVAL (trueop1
);
4434 HOST_WIDE_INT mmin
, mmax
;
4444 /* Get a reduced range if the sign bit is zero. */
4445 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4452 rtx mmin_rtx
, mmax_rtx
;
4453 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4455 mmin
= INTVAL (mmin_rtx
);
4456 mmax
= INTVAL (mmax_rtx
);
4459 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4461 mmin
>>= (sign_copies
- 1);
4462 mmax
>>= (sign_copies
- 1);
4468 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4470 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4471 return const_true_rtx
;
4472 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4477 return const_true_rtx
;
4482 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4484 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4485 return const_true_rtx
;
4486 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4491 return const_true_rtx
;
4497 /* x == y is always false for y out of range. */
4498 if (val
< mmin
|| val
> mmax
)
4502 /* x > y is always false for y >= mmax, always true for y < mmin. */
4504 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4506 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4507 return const_true_rtx
;
4513 return const_true_rtx
;
4516 /* x < y is always false for y <= mmin, always true for y > mmax. */
4518 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4520 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4521 return const_true_rtx
;
4527 return const_true_rtx
;
4531 /* x != y is always true for y out of range. */
4532 if (val
< mmin
|| val
> mmax
)
4533 return const_true_rtx
;
4541 /* Optimize integer comparisons with zero. */
4542 if (trueop1
== const0_rtx
)
4544 /* Some addresses are known to be nonzero. We don't know
4545 their sign, but equality comparisons are known. */
4546 if (nonzero_address_p (trueop0
))
4548 if (code
== EQ
|| code
== LEU
)
4550 if (code
== NE
|| code
== GTU
)
4551 return const_true_rtx
;
4554 /* See if the first operand is an IOR with a constant. If so, we
4555 may be able to determine the result of this comparison. */
4556 if (GET_CODE (op0
) == IOR
)
4558 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4559 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4561 int sign_bitnum
= GET_MODE_BITSIZE (mode
) - 1;
4562 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4563 && (INTVAL (inner_const
)
4564 & ((HOST_WIDE_INT
) 1 << sign_bitnum
)));
4573 return const_true_rtx
;
4577 return const_true_rtx
;
4591 /* Optimize comparison of ABS with zero. */
4592 if (trueop1
== CONST0_RTX (mode
)
4593 && (GET_CODE (trueop0
) == ABS
4594 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4595 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4600 /* Optimize abs(x) < 0.0. */
4601 if (!HONOR_SNANS (mode
)
4602 && (!INTEGRAL_MODE_P (mode
)
4603 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4605 if (INTEGRAL_MODE_P (mode
)
4606 && (issue_strict_overflow_warning
4607 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4608 warning (OPT_Wstrict_overflow
,
4609 ("assuming signed overflow does not occur when "
4610 "assuming abs (x) < 0 is false"));
4616 /* Optimize abs(x) >= 0.0. */
4617 if (!HONOR_NANS (mode
)
4618 && (!INTEGRAL_MODE_P (mode
)
4619 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4621 if (INTEGRAL_MODE_P (mode
)
4622 && (issue_strict_overflow_warning
4623 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4624 warning (OPT_Wstrict_overflow
,
4625 ("assuming signed overflow does not occur when "
4626 "assuming abs (x) >= 0 is true"));
4627 return const_true_rtx
;
4632 /* Optimize ! (abs(x) < 0.0). */
4633 return const_true_rtx
;
4643 /* Simplify CODE, an operation with result mode MODE and three operands,
4644 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4645 a constant. Return 0 if no simplifications is possible. */
4648 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4649 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4652 unsigned int width
= GET_MODE_BITSIZE (mode
);
4654 /* VOIDmode means "infinite" precision. */
4656 width
= HOST_BITS_PER_WIDE_INT
;
4662 if (CONST_INT_P (op0
)
4663 && CONST_INT_P (op1
)
4664 && CONST_INT_P (op2
)
4665 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4666 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4668 /* Extracting a bit-field from a constant */
4669 HOST_WIDE_INT val
= INTVAL (op0
);
4671 if (BITS_BIG_ENDIAN
)
4672 val
>>= (GET_MODE_BITSIZE (op0_mode
)
4673 - INTVAL (op2
) - INTVAL (op1
));
4675 val
>>= INTVAL (op2
);
4677 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4679 /* First zero-extend. */
4680 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4681 /* If desired, propagate sign bit. */
4682 if (code
== SIGN_EXTRACT
4683 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
4684 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4687 /* Clear the bits that don't belong in our mode,
4688 unless they and our sign bit are all one.
4689 So we get either a reasonable negative value or a reasonable
4690 unsigned value for this mode. */
4691 if (width
< HOST_BITS_PER_WIDE_INT
4692 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
4693 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
4694 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4696 return gen_int_mode (val
, mode
);
4701 if (CONST_INT_P (op0
))
4702 return op0
!= const0_rtx
? op1
: op2
;
4704 /* Convert c ? a : a into "a". */
4705 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4708 /* Convert a != b ? a : b into "a". */
4709 if (GET_CODE (op0
) == NE
4710 && ! side_effects_p (op0
)
4711 && ! HONOR_NANS (mode
)
4712 && ! HONOR_SIGNED_ZEROS (mode
)
4713 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4714 && rtx_equal_p (XEXP (op0
, 1), op2
))
4715 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4716 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4719 /* Convert a == b ? a : b into "b". */
4720 if (GET_CODE (op0
) == EQ
4721 && ! side_effects_p (op0
)
4722 && ! HONOR_NANS (mode
)
4723 && ! HONOR_SIGNED_ZEROS (mode
)
4724 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4725 && rtx_equal_p (XEXP (op0
, 1), op2
))
4726 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4727 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4730 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4732 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4733 ? GET_MODE (XEXP (op0
, 1))
4734 : GET_MODE (XEXP (op0
, 0)));
4737 /* Look for happy constants in op1 and op2. */
4738 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
4740 HOST_WIDE_INT t
= INTVAL (op1
);
4741 HOST_WIDE_INT f
= INTVAL (op2
);
4743 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4744 code
= GET_CODE (op0
);
4745 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4748 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4756 return simplify_gen_relational (code
, mode
, cmp_mode
,
4757 XEXP (op0
, 0), XEXP (op0
, 1));
4760 if (cmp_mode
== VOIDmode
)
4761 cmp_mode
= op0_mode
;
4762 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4763 cmp_mode
, XEXP (op0
, 0),
4766 /* See if any simplifications were possible. */
4769 if (CONST_INT_P (temp
))
4770 return temp
== const0_rtx
? op2
: op1
;
4772 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4778 gcc_assert (GET_MODE (op0
) == mode
);
4779 gcc_assert (GET_MODE (op1
) == mode
);
4780 gcc_assert (VECTOR_MODE_P (mode
));
4781 op2
= avoid_constant_pool_reference (op2
);
4782 if (CONST_INT_P (op2
))
4784 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4785 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4786 int mask
= (1 << n_elts
) - 1;
4788 if (!(INTVAL (op2
) & mask
))
4790 if ((INTVAL (op2
) & mask
) == mask
)
4793 op0
= avoid_constant_pool_reference (op0
);
4794 op1
= avoid_constant_pool_reference (op1
);
4795 if (GET_CODE (op0
) == CONST_VECTOR
4796 && GET_CODE (op1
) == CONST_VECTOR
)
4798 rtvec v
= rtvec_alloc (n_elts
);
4801 for (i
= 0; i
< n_elts
; i
++)
4802 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4803 ? CONST_VECTOR_ELT (op0
, i
)
4804 : CONST_VECTOR_ELT (op1
, i
));
4805 return gen_rtx_CONST_VECTOR (mode
, v
);
4817 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4819 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4821 Works by unpacking OP into a collection of 8-bit values
4822 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4823 and then repacking them again for OUTERMODE. */
4826 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4827 enum machine_mode innermode
, unsigned int byte
)
4829 /* We support up to 512-bit values (for V8DFmode). */
4833 value_mask
= (1 << value_bit
) - 1
4835 unsigned char value
[max_bitsize
/ value_bit
];
4844 rtvec result_v
= NULL
;
4845 enum mode_class outer_class
;
4846 enum machine_mode outer_submode
;
4848 /* Some ports misuse CCmode. */
4849 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
4852 /* We have no way to represent a complex constant at the rtl level. */
4853 if (COMPLEX_MODE_P (outermode
))
4856 /* Unpack the value. */
4858 if (GET_CODE (op
) == CONST_VECTOR
)
4860 num_elem
= CONST_VECTOR_NUNITS (op
);
4861 elems
= &CONST_VECTOR_ELT (op
, 0);
4862 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4868 elem_bitsize
= max_bitsize
;
4870 /* If this asserts, it is too complicated; reducing value_bit may help. */
4871 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4872 /* I don't know how to handle endianness of sub-units. */
4873 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4875 for (elem
= 0; elem
< num_elem
; elem
++)
4878 rtx el
= elems
[elem
];
4880 /* Vectors are kept in target memory order. (This is probably
4883 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4884 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4886 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4887 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4888 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4889 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4890 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4893 switch (GET_CODE (el
))
4897 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4899 *vp
++ = INTVAL (el
) >> i
;
4900 /* CONST_INTs are always logically sign-extended. */
4901 for (; i
< elem_bitsize
; i
+= value_bit
)
4902 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4906 if (GET_MODE (el
) == VOIDmode
)
4908 /* If this triggers, someone should have generated a
4909 CONST_INT instead. */
4910 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4912 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4913 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4914 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
4917 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
4920 /* It shouldn't matter what's done here, so fill it with
4922 for (; i
< elem_bitsize
; i
+= value_bit
)
4927 long tmp
[max_bitsize
/ 32];
4928 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
4930 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
4931 gcc_assert (bitsize
<= elem_bitsize
);
4932 gcc_assert (bitsize
% value_bit
== 0);
4934 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
4937 /* real_to_target produces its result in words affected by
4938 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4939 and use WORDS_BIG_ENDIAN instead; see the documentation
4940 of SUBREG in rtl.texi. */
4941 for (i
= 0; i
< bitsize
; i
+= value_bit
)
4944 if (WORDS_BIG_ENDIAN
)
4945 ibase
= bitsize
- 1 - i
;
4948 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
4951 /* It shouldn't matter what's done here, so fill it with
4953 for (; i
< elem_bitsize
; i
+= value_bit
)
4959 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4961 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4962 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4966 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4967 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4968 for (; i
< 2 * HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4970 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
4971 >> (i
- HOST_BITS_PER_WIDE_INT
);
4972 for (; i
< elem_bitsize
; i
+= value_bit
)
4982 /* Now, pick the right byte to start with. */
4983 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4984 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4985 will already have offset 0. */
4986 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
4988 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
4990 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4991 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4992 byte
= (subword_byte
% UNITS_PER_WORD
4993 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4996 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4997 so if it's become negative it will instead be very large.) */
4998 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5000 /* Convert from bytes to chunks of size value_bit. */
5001 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5003 /* Re-pack the value. */
5005 if (VECTOR_MODE_P (outermode
))
5007 num_elem
= GET_MODE_NUNITS (outermode
);
5008 result_v
= rtvec_alloc (num_elem
);
5009 elems
= &RTVEC_ELT (result_v
, 0);
5010 outer_submode
= GET_MODE_INNER (outermode
);
5016 outer_submode
= outermode
;
5019 outer_class
= GET_MODE_CLASS (outer_submode
);
5020 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5022 gcc_assert (elem_bitsize
% value_bit
== 0);
5023 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5025 for (elem
= 0; elem
< num_elem
; elem
++)
5029 /* Vectors are stored in target memory order. (This is probably
5032 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5033 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5035 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5036 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5037 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5038 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5039 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5042 switch (outer_class
)
5045 case MODE_PARTIAL_INT
:
5047 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5050 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5052 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5053 for (; i
< elem_bitsize
; i
+= value_bit
)
5054 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
5055 << (i
- HOST_BITS_PER_WIDE_INT
));
5057 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5059 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5060 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5061 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
5062 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5069 case MODE_DECIMAL_FLOAT
:
5072 long tmp
[max_bitsize
/ 32];
5074 /* real_from_target wants its input in words affected by
5075 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5076 and use WORDS_BIG_ENDIAN instead; see the documentation
5077 of SUBREG in rtl.texi. */
5078 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5080 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5083 if (WORDS_BIG_ENDIAN
)
5084 ibase
= elem_bitsize
- 1 - i
;
5087 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5090 real_from_target (&r
, tmp
, outer_submode
);
5091 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5103 f
.mode
= outer_submode
;
5106 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5108 f
.data
.low
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5109 for (; i
< elem_bitsize
; i
+= value_bit
)
5110 f
.data
.high
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
5111 << (i
- HOST_BITS_PER_WIDE_INT
));
5113 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5121 if (VECTOR_MODE_P (outermode
))
5122 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5127 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5128 Return 0 if no simplifications are possible. */
5130 simplify_subreg (enum machine_mode outermode
, rtx op
,
5131 enum machine_mode innermode
, unsigned int byte
)
5133 /* Little bit of sanity checking. */
5134 gcc_assert (innermode
!= VOIDmode
);
5135 gcc_assert (outermode
!= VOIDmode
);
5136 gcc_assert (innermode
!= BLKmode
);
5137 gcc_assert (outermode
!= BLKmode
);
5139 gcc_assert (GET_MODE (op
) == innermode
5140 || GET_MODE (op
) == VOIDmode
);
5142 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
5143 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5145 if (outermode
== innermode
&& !byte
)
5148 if (CONST_INT_P (op
)
5149 || GET_CODE (op
) == CONST_DOUBLE
5150 || GET_CODE (op
) == CONST_FIXED
5151 || GET_CODE (op
) == CONST_VECTOR
)
5152 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5154 /* Changing mode twice with SUBREG => just change it once,
5155 or not at all if changing back op starting mode. */
5156 if (GET_CODE (op
) == SUBREG
)
5158 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5159 int final_offset
= byte
+ SUBREG_BYTE (op
);
5162 if (outermode
== innermostmode
5163 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5164 return SUBREG_REG (op
);
5166 /* The SUBREG_BYTE represents offset, as if the value were stored
5167 in memory. Irritating exception is paradoxical subreg, where
5168 we define SUBREG_BYTE to be 0. On big endian machines, this
5169 value should be negative. For a moment, undo this exception. */
5170 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5172 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5173 if (WORDS_BIG_ENDIAN
)
5174 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5175 if (BYTES_BIG_ENDIAN
)
5176 final_offset
+= difference
% UNITS_PER_WORD
;
5178 if (SUBREG_BYTE (op
) == 0
5179 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5181 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5182 if (WORDS_BIG_ENDIAN
)
5183 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5184 if (BYTES_BIG_ENDIAN
)
5185 final_offset
+= difference
% UNITS_PER_WORD
;
5188 /* See whether resulting subreg will be paradoxical. */
5189 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5191 /* In nonparadoxical subregs we can't handle negative offsets. */
5192 if (final_offset
< 0)
5194 /* Bail out in case resulting subreg would be incorrect. */
5195 if (final_offset
% GET_MODE_SIZE (outermode
)
5196 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5202 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5204 /* In paradoxical subreg, see if we are still looking on lower part.
5205 If so, our SUBREG_BYTE will be 0. */
5206 if (WORDS_BIG_ENDIAN
)
5207 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5208 if (BYTES_BIG_ENDIAN
)
5209 offset
+= difference
% UNITS_PER_WORD
;
5210 if (offset
== final_offset
)
5216 /* Recurse for further possible simplifications. */
5217 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5221 if (validate_subreg (outermode
, innermostmode
,
5222 SUBREG_REG (op
), final_offset
))
5224 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5225 if (SUBREG_PROMOTED_VAR_P (op
)
5226 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5227 && GET_MODE_CLASS (outermode
) == MODE_INT
5228 && IN_RANGE (GET_MODE_SIZE (outermode
),
5229 GET_MODE_SIZE (innermode
),
5230 GET_MODE_SIZE (innermostmode
))
5231 && subreg_lowpart_p (newx
))
5233 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5234 SUBREG_PROMOTED_UNSIGNED_SET
5235 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5242 /* Merge implicit and explicit truncations. */
5244 if (GET_CODE (op
) == TRUNCATE
5245 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5246 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5247 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5248 GET_MODE (XEXP (op
, 0)));
5250 /* SUBREG of a hard register => just change the register number
5251 and/or mode. If the hard register is not valid in that mode,
5252 suppress this simplification. If the hard register is the stack,
5253 frame, or argument pointer, leave this as a SUBREG. */
5255 if (REG_P (op
) && HARD_REGISTER_P (op
))
5257 unsigned int regno
, final_regno
;
5260 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5261 if (HARD_REGISTER_NUM_P (final_regno
))
5264 int final_offset
= byte
;
5266 /* Adjust offset for paradoxical subregs. */
5268 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5270 int difference
= (GET_MODE_SIZE (innermode
)
5271 - GET_MODE_SIZE (outermode
));
5272 if (WORDS_BIG_ENDIAN
)
5273 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5274 if (BYTES_BIG_ENDIAN
)
5275 final_offset
+= difference
% UNITS_PER_WORD
;
5278 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5280 /* Propagate original regno. We don't have any way to specify
5281 the offset inside original regno, so do so only for lowpart.
5282 The information is used only by alias analysis that can not
5283 grog partial register anyway. */
5285 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5286 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5291 /* If we have a SUBREG of a register that we are replacing and we are
5292 replacing it with a MEM, make a new MEM and try replacing the
5293 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5294 or if we would be widening it. */
5297 && ! mode_dependent_address_p (XEXP (op
, 0))
5298 /* Allow splitting of volatile memory references in case we don't
5299 have instruction to move the whole thing. */
5300 && (! MEM_VOLATILE_P (op
)
5301 || ! have_insn_for (SET
, innermode
))
5302 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5303 return adjust_address_nv (op
, outermode
, byte
);
5305 /* Handle complex values represented as CONCAT
5306 of real and imaginary part. */
5307 if (GET_CODE (op
) == CONCAT
)
5309 unsigned int part_size
, final_offset
;
5312 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5313 if (byte
< part_size
)
5315 part
= XEXP (op
, 0);
5316 final_offset
= byte
;
5320 part
= XEXP (op
, 1);
5321 final_offset
= byte
- part_size
;
5324 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5327 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5330 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5331 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5335 /* Optimize SUBREG truncations of zero and sign extended values. */
5336 if ((GET_CODE (op
) == ZERO_EXTEND
5337 || GET_CODE (op
) == SIGN_EXTEND
)
5338 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
5340 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5342 /* If we're requesting the lowpart of a zero or sign extension,
5343 there are three possibilities. If the outermode is the same
5344 as the origmode, we can omit both the extension and the subreg.
5345 If the outermode is not larger than the origmode, we can apply
5346 the truncation without the extension. Finally, if the outermode
5347 is larger than the origmode, but both are integer modes, we
5348 can just extend to the appropriate mode. */
5351 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5352 if (outermode
== origmode
)
5353 return XEXP (op
, 0);
5354 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
5355 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5356 subreg_lowpart_offset (outermode
,
5358 if (SCALAR_INT_MODE_P (outermode
))
5359 return simplify_gen_unary (GET_CODE (op
), outermode
,
5360 XEXP (op
, 0), origmode
);
5363 /* A SUBREG resulting from a zero extension may fold to zero if
5364 it extracts higher bits that the ZERO_EXTEND's source bits. */
5365 if (GET_CODE (op
) == ZERO_EXTEND
5366 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
5367 return CONST0_RTX (outermode
);
5370 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5371 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5372 the outer subreg is effectively a truncation to the original mode. */
5373 if ((GET_CODE (op
) == LSHIFTRT
5374 || GET_CODE (op
) == ASHIFTRT
)
5375 && SCALAR_INT_MODE_P (outermode
)
5376 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5377 to avoid the possibility that an outer LSHIFTRT shifts by more
5378 than the sign extension's sign_bit_copies and introduces zeros
5379 into the high bits of the result. */
5380 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
5381 && CONST_INT_P (XEXP (op
, 1))
5382 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5383 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5384 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5385 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5386 return simplify_gen_binary (ASHIFTRT
, outermode
,
5387 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5389 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5390 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5391 the outer subreg is effectively a truncation to the original mode. */
5392 if ((GET_CODE (op
) == LSHIFTRT
5393 || GET_CODE (op
) == ASHIFTRT
)
5394 && SCALAR_INT_MODE_P (outermode
)
5395 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5396 && CONST_INT_P (XEXP (op
, 1))
5397 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5398 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5399 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5400 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5401 return simplify_gen_binary (LSHIFTRT
, outermode
,
5402 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5404 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5405 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5406 the outer subreg is effectively a truncation to the original mode. */
5407 if (GET_CODE (op
) == ASHIFT
5408 && SCALAR_INT_MODE_P (outermode
)
5409 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5410 && CONST_INT_P (XEXP (op
, 1))
5411 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5412 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5413 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5414 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5415 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5416 return simplify_gen_binary (ASHIFT
, outermode
,
5417 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5419 /* Recognize a word extraction from a multi-word subreg. */
5420 if ((GET_CODE (op
) == LSHIFTRT
5421 || GET_CODE (op
) == ASHIFTRT
)
5422 && SCALAR_INT_MODE_P (outermode
)
5423 && GET_MODE_BITSIZE (outermode
) >= BITS_PER_WORD
5424 && GET_MODE_BITSIZE (innermode
) >= (2 * GET_MODE_BITSIZE (outermode
))
5425 && CONST_INT_P (XEXP (op
, 1))
5426 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_BITSIZE (outermode
) - 1)) == 0
5427 && INTVAL (XEXP (op
, 1)) >= 0
5428 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5429 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5431 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5432 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5434 ? byte
- shifted_bytes
5435 : byte
+ shifted_bytes
));
5441 /* Make a SUBREG operation or equivalent if it folds. */
5444 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5445 enum machine_mode innermode
, unsigned int byte
)
5449 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5453 if (GET_CODE (op
) == SUBREG
5454 || GET_CODE (op
) == CONCAT
5455 || GET_MODE (op
) == VOIDmode
)
5458 if (validate_subreg (outermode
, innermode
, op
, byte
))
5459 return gen_rtx_SUBREG (outermode
, op
, byte
);
5464 /* Simplify X, an rtx expression.
5466 Return the simplified expression or NULL if no simplifications
5469 This is the preferred entry point into the simplification routines;
5470 however, we still allow passes to call the more specific routines.
5472 Right now GCC has three (yes, three) major bodies of RTL simplification
5473 code that need to be unified.
5475 1. fold_rtx in cse.c. This code uses various CSE specific
5476 information to aid in RTL simplification.
5478 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5479 it uses combine specific information to aid in RTL
5482 3. The routines in this file.
5485 Long term we want to only have one body of simplification code; to
5486 get to that state I recommend the following steps:
5488 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5489 which are not pass dependent state into these routines.
5491 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5492 use this routine whenever possible.
5494 3. Allow for pass dependent state to be provided to these
5495 routines and add simplifications based on the pass dependent
5496 state. Remove code from cse.c & combine.c that becomes
5499 It will take time, but ultimately the compiler will be easier to
5500 maintain and improve. It's totally silly that when we add a
5501 simplification that it needs to be added to 4 places (3 for RTL
5502 simplification and 1 for tree simplification. */
5505 simplify_rtx (const_rtx x
)
5507 const enum rtx_code code
= GET_CODE (x
);
5508 const enum machine_mode mode
= GET_MODE (x
);
5510 switch (GET_RTX_CLASS (code
))
5513 return simplify_unary_operation (code
, mode
,
5514 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5515 case RTX_COMM_ARITH
:
5516 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5517 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5519 /* Fall through.... */
5522 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5525 case RTX_BITFIELD_OPS
:
5526 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5527 XEXP (x
, 0), XEXP (x
, 1),
5531 case RTX_COMM_COMPARE
:
5532 return simplify_relational_operation (code
, mode
,
5533 ((GET_MODE (XEXP (x
, 0))
5535 ? GET_MODE (XEXP (x
, 0))
5536 : GET_MODE (XEXP (x
, 1))),
5542 return simplify_subreg (mode
, SUBREG_REG (x
),
5543 GET_MODE (SUBREG_REG (x
)),
5550 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5551 if (GET_CODE (XEXP (x
, 0)) == HIGH
5552 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))