1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
29 #include "hard-reg-set.h"
32 #include "insn-config.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
65 /* Similar, but also allows reference to the stack pointer.
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
91 /* Much code operates on (low, high) pairs; the low value is an
92 unsigned wide int, the high value a signed wide int. We
93 occasionally need to sign extend from low to high as if low were a
95 #define HWI_SIGN_EXTEND(low) \
96 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98 static rtx neg_const_int
PARAMS ((enum machine_mode
, rtx
));
99 static int simplify_plus_minus_op_data_cmp
PARAMS ((const void *,
101 static rtx simplify_plus_minus
PARAMS ((enum rtx_code
,
102 enum machine_mode
, rtx
,
104 static void check_fold_consts
PARAMS ((PTR
));
105 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
106 static void simplify_unary_real
PARAMS ((PTR
));
107 static void simplify_binary_real
PARAMS ((PTR
));
109 static void simplify_binary_is2orm1
PARAMS ((PTR
));
112 /* Negate a CONST_INT rtx, truncating (because a conversion from a
113 maximally negative number can overflow). */
115 neg_const_int (mode
, i
)
116 enum machine_mode mode
;
119 return GEN_INT (trunc_int_for_mode (- INTVAL (i
), mode
));
123 /* Make a binary operation by properly ordering the operands and
124 seeing if the expression folds. */
127 simplify_gen_binary (code
, mode
, op0
, op1
)
129 enum machine_mode mode
;
134 /* Put complex operands first and constants second if commutative. */
135 if (GET_RTX_CLASS (code
) == 'c'
136 && swap_commutative_operands_p (op0
, op1
))
137 tem
= op0
, op0
= op1
, op1
= tem
;
139 /* If this simplifies, do it. */
140 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
144 /* Handle addition and subtraction specially. Otherwise, just form
147 if (code
== PLUS
|| code
== MINUS
)
148 return simplify_plus_minus (code
, mode
, op0
, op1
, 1);
150 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
153 /* If X is a MEM referencing the constant pool, return the real value.
154 Otherwise return X. */
156 avoid_constant_pool_reference (x
)
160 enum machine_mode cmode
;
162 if (GET_CODE (x
) != MEM
)
166 if (GET_CODE (addr
) != SYMBOL_REF
167 || ! CONSTANT_POOL_ADDRESS_P (addr
))
170 c
= get_pool_constant (addr
);
171 cmode
= get_pool_mode (addr
);
173 /* If we're accessing the constant in a different mode than it was
174 originally stored, attempt to fix that up via subreg simplifications.
175 If that fails we have no choice but to return the original memory. */
176 if (cmode
!= GET_MODE (x
))
178 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
185 /* Make a unary operation by first seeing if it folds and otherwise making
186 the specified operation. */
189 simplify_gen_unary (code
, mode
, op
, op_mode
)
191 enum machine_mode mode
;
193 enum machine_mode op_mode
;
197 /* If this simplifies, use it. */
198 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
201 return gen_rtx_fmt_e (code
, mode
, op
);
204 /* Likewise for ternary operations. */
207 simplify_gen_ternary (code
, mode
, op0_mode
, op0
, op1
, op2
)
209 enum machine_mode mode
, op0_mode
;
214 /* If this simplifies, use it. */
215 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
219 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
222 /* Likewise, for relational operations.
223 CMP_MODE specifies mode comparison is done in.
227 simplify_gen_relational (code
, mode
, cmp_mode
, op0
, op1
)
229 enum machine_mode mode
;
230 enum machine_mode cmp_mode
;
235 if ((tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
)) != 0)
238 /* Put complex operands first and constants second. */
239 if (swap_commutative_operands_p (op0
, op1
))
240 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
242 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
245 /* Replace all occurrences of OLD in X with NEW and try to simplify the
246 resulting RTX. Return a new RTX which is as simplified as possible. */
249 simplify_replace_rtx (x
, old
, new)
254 enum rtx_code code
= GET_CODE (x
);
255 enum machine_mode mode
= GET_MODE (x
);
257 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
258 to build a new expression substituting recursively. If we can't do
259 anything, return our input. */
264 switch (GET_RTX_CLASS (code
))
268 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
269 rtx op
= (XEXP (x
, 0) == old
270 ? new : simplify_replace_rtx (XEXP (x
, 0), old
, new));
272 return simplify_gen_unary (code
, mode
, op
, op_mode
);
278 simplify_gen_binary (code
, mode
,
279 simplify_replace_rtx (XEXP (x
, 0), old
, new),
280 simplify_replace_rtx (XEXP (x
, 1), old
, new));
283 enum machine_mode op_mode
= (GET_MODE (XEXP (x
, 0)) != VOIDmode
284 ? GET_MODE (XEXP (x
, 0))
285 : GET_MODE (XEXP (x
, 1)));
286 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
287 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
290 simplify_gen_relational (code
, mode
,
293 : GET_MODE (op0
) != VOIDmode
302 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
303 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
306 simplify_gen_ternary (code
, mode
,
311 simplify_replace_rtx (XEXP (x
, 1), old
, new),
312 simplify_replace_rtx (XEXP (x
, 2), old
, new));
316 /* The only case we try to handle is a SUBREG. */
320 exp
= simplify_gen_subreg (GET_MODE (x
),
321 simplify_replace_rtx (SUBREG_REG (x
),
323 GET_MODE (SUBREG_REG (x
)),
331 if (GET_CODE (x
) == MEM
)
333 replace_equiv_address_nv (x
,
334 simplify_replace_rtx (XEXP (x
, 0),
342 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
343 /* Subroutine of simplify_unary_operation, called via do_float_handler.
344 Handles simplification of unary ops on floating point values. */
345 struct simplify_unary_real_args
349 enum machine_mode mode
;
353 #define REAL_VALUE_ABS(d_) \
354 (REAL_VALUE_NEGATIVE (d_) ? REAL_VALUE_NEGATE (d_) : (d_))
357 simplify_unary_real (p
)
362 struct simplify_unary_real_args
*args
=
363 (struct simplify_unary_real_args
*) p
;
365 REAL_VALUE_FROM_CONST_DOUBLE (d
, args
->operand
);
367 if (args
->want_integer
)
373 case FIX
: i
= REAL_VALUE_FIX (d
); break;
374 case UNSIGNED_FIX
: i
= REAL_VALUE_UNSIGNED_FIX (d
); break;
378 args
->result
= GEN_INT (trunc_int_for_mode (i
, args
->mode
));
385 /* We don't attempt to optimize this. */
389 case ABS
: d
= REAL_VALUE_ABS (d
); break;
390 case NEG
: d
= REAL_VALUE_NEGATE (d
); break;
391 case FLOAT_TRUNCATE
: d
= real_value_truncate (args
->mode
, d
); break;
392 case FLOAT_EXTEND
: /* All this does is change the mode. */ break;
393 case FIX
: d
= REAL_VALUE_RNDZINT (d
); break;
394 case UNSIGNED_FIX
: d
= REAL_VALUE_UNSIGNED_RNDZINT (d
); break;
398 args
->result
= CONST_DOUBLE_FROM_REAL_VALUE (d
, args
->mode
);
403 /* Try to simplify a unary operation CODE whose output mode is to be
404 MODE with input operand OP whose mode was originally OP_MODE.
405 Return zero if no simplification can be made. */
407 simplify_unary_operation (code
, mode
, op
, op_mode
)
409 enum machine_mode mode
;
411 enum machine_mode op_mode
;
413 unsigned int width
= GET_MODE_BITSIZE (mode
);
414 rtx trueop
= avoid_constant_pool_reference (op
);
416 /* The order of these tests is critical so that, for example, we don't
417 check the wrong mode (input vs. output) for a conversion operation,
418 such as FIX. At some point, this should be simplified. */
420 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
422 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
423 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
425 HOST_WIDE_INT hv
, lv
;
428 if (GET_CODE (trueop
) == CONST_INT
)
429 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
431 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
433 #ifdef REAL_ARITHMETIC
434 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
439 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
440 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
441 d
+= (double) (unsigned HOST_WIDE_INT
) (~ lv
);
447 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
448 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
449 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
451 #endif /* REAL_ARITHMETIC */
452 d
= real_value_truncate (mode
, d
);
453 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
455 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
456 && (GET_CODE (trueop
) == CONST_DOUBLE
457 || GET_CODE (trueop
) == CONST_INT
))
459 HOST_WIDE_INT hv
, lv
;
462 if (GET_CODE (trueop
) == CONST_INT
)
463 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
465 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
467 if (op_mode
== VOIDmode
)
469 /* We don't know how to interpret negative-looking numbers in
470 this case, so don't try to fold those. */
474 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
477 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
479 #ifdef REAL_ARITHMETIC
480 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
483 d
= (double) (unsigned HOST_WIDE_INT
) hv
;
484 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
485 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
486 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
487 #endif /* REAL_ARITHMETIC */
488 d
= real_value_truncate (mode
, d
);
489 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
493 if (GET_CODE (trueop
) == CONST_INT
494 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
496 HOST_WIDE_INT arg0
= INTVAL (trueop
);
510 val
= (arg0
>= 0 ? arg0
: - arg0
);
514 /* Don't use ffs here. Instead, get low order bit and then its
515 number. If arg0 is zero, this will return 0, as desired. */
516 arg0
&= GET_MODE_MASK (mode
);
517 val
= exact_log2 (arg0
& (- arg0
)) + 1;
525 /* When zero-extending a CONST_INT, we need to know its
527 if (op_mode
== VOIDmode
)
529 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
531 /* If we were really extending the mode,
532 we would have to distinguish between zero-extension
533 and sign-extension. */
534 if (width
!= GET_MODE_BITSIZE (op_mode
))
538 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
539 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
545 if (op_mode
== VOIDmode
)
547 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
549 /* If we were really extending the mode,
550 we would have to distinguish between zero-extension
551 and sign-extension. */
552 if (width
!= GET_MODE_BITSIZE (op_mode
))
556 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
559 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
561 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
562 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
579 val
= trunc_int_for_mode (val
, mode
);
581 return GEN_INT (val
);
584 /* We can do some operations on integer CONST_DOUBLEs. Also allow
585 for a DImode operation on a CONST_INT. */
586 else if (GET_MODE (trueop
) == VOIDmode
587 && width
<= HOST_BITS_PER_WIDE_INT
* 2
588 && (GET_CODE (trueop
) == CONST_DOUBLE
589 || GET_CODE (trueop
) == CONST_INT
))
591 unsigned HOST_WIDE_INT l1
, lv
;
592 HOST_WIDE_INT h1
, hv
;
594 if (GET_CODE (trueop
) == CONST_DOUBLE
)
595 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
597 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
607 neg_double (l1
, h1
, &lv
, &hv
);
612 neg_double (l1
, h1
, &lv
, &hv
);
620 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& (-h1
)) + 1;
622 lv
= exact_log2 (l1
& (-l1
)) + 1;
626 /* This is just a change-of-mode, so do nothing. */
631 if (op_mode
== VOIDmode
)
634 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
638 lv
= l1
& GET_MODE_MASK (op_mode
);
642 if (op_mode
== VOIDmode
643 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
647 lv
= l1
& GET_MODE_MASK (op_mode
);
648 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
649 && (lv
& ((HOST_WIDE_INT
) 1
650 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
651 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
653 hv
= HWI_SIGN_EXTEND (lv
);
664 return immed_double_const (lv
, hv
, mode
);
667 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
668 else if (GET_CODE (trueop
) == CONST_DOUBLE
669 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
671 struct simplify_unary_real_args args
;
672 args
.operand
= trueop
;
675 args
.want_integer
= false;
677 if (do_float_handler (simplify_unary_real
, (PTR
) &args
))
683 else if (GET_CODE (trueop
) == CONST_DOUBLE
684 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
685 && GET_MODE_CLASS (mode
) == MODE_INT
686 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
688 struct simplify_unary_real_args args
;
689 args
.operand
= trueop
;
692 args
.want_integer
= true;
694 if (do_float_handler (simplify_unary_real
, (PTR
) &args
))
700 /* This was formerly used only for non-IEEE float.
701 eggert@twinsun.com says it is safe for IEEE also. */
704 enum rtx_code reversed
;
705 /* There are some simplifications we can do even if the operands
710 /* (not (not X)) == X. */
711 if (GET_CODE (op
) == NOT
)
714 /* (not (eq X Y)) == (ne X Y), etc. */
715 if (mode
== BImode
&& GET_RTX_CLASS (GET_CODE (op
)) == '<'
716 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
718 return gen_rtx_fmt_ee (reversed
,
719 op_mode
, XEXP (op
, 0), XEXP (op
, 1));
723 /* (neg (neg X)) == X. */
724 if (GET_CODE (op
) == NEG
)
729 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
730 becomes just the MINUS if its mode is MODE. This allows
731 folding switch statements on machines using casesi (such as
733 if (GET_CODE (op
) == TRUNCATE
734 && GET_MODE (XEXP (op
, 0)) == mode
735 && GET_CODE (XEXP (op
, 0)) == MINUS
736 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
737 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
740 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
741 if (! POINTERS_EXTEND_UNSIGNED
742 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
744 || (GET_CODE (op
) == SUBREG
745 && GET_CODE (SUBREG_REG (op
)) == REG
746 && REG_POINTER (SUBREG_REG (op
))
747 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
748 return convert_memory_address (Pmode
, op
);
752 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
754 if (POINTERS_EXTEND_UNSIGNED
> 0
755 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
757 || (GET_CODE (op
) == SUBREG
758 && GET_CODE (SUBREG_REG (op
)) == REG
759 && REG_POINTER (SUBREG_REG (op
))
760 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
761 return convert_memory_address (Pmode
, op
);
773 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
774 /* Subroutine of simplify_binary_operation, called via do_float_handler.
775 Handles simplification of binary ops on floating point values. */
776 struct simplify_binary_real_args
778 rtx trueop0
, trueop1
;
781 enum machine_mode mode
;
785 simplify_binary_real (p
)
788 REAL_VALUE_TYPE f0
, f1
, value
;
789 struct simplify_binary_real_args
*args
=
790 (struct simplify_binary_real_args
*) p
;
792 REAL_VALUE_FROM_CONST_DOUBLE (f0
, args
->trueop0
);
793 REAL_VALUE_FROM_CONST_DOUBLE (f1
, args
->trueop1
);
794 f0
= real_value_truncate (args
->mode
, f0
);
795 f1
= real_value_truncate (args
->mode
, f1
);
797 #ifdef REAL_ARITHMETIC
798 #ifndef REAL_INFINITY
799 if (args
->code
== DIV
&& REAL_VALUES_EQUAL (f1
, dconst0
))
805 REAL_ARITHMETIC (value
, rtx_to_tree_code (args
->code
), f0
, f1
);
819 #ifndef REAL_INFINITY
826 value
= MIN (f0
, f1
);
829 value
= MAX (f0
, f1
);
836 value
= real_value_truncate (args
->mode
, value
);
837 args
->result
= CONST_DOUBLE_FROM_REAL_VALUE (value
, args
->mode
);
841 /* Another subroutine called via do_float_handler. This one tests
842 the floating point value given against 2. and -1. */
843 struct simplify_binary_is2orm1_args
851 simplify_binary_is2orm1 (p
)
855 struct simplify_binary_is2orm1_args
*args
=
856 (struct simplify_binary_is2orm1_args
*) p
;
858 REAL_VALUE_FROM_CONST_DOUBLE (d
, args
->value
);
859 args
->is_2
= REAL_VALUES_EQUAL (d
, dconst2
);
860 args
->is_m1
= REAL_VALUES_EQUAL (d
, dconstm1
);
863 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
864 and OP1. Return 0 if no simplification is possible.
866 Don't use this for relational operations such as EQ or LT.
867 Use simplify_relational_operation instead. */
869 simplify_binary_operation (code
, mode
, op0
, op1
)
871 enum machine_mode mode
;
874 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
876 unsigned int width
= GET_MODE_BITSIZE (mode
);
878 rtx trueop0
= avoid_constant_pool_reference (op0
);
879 rtx trueop1
= avoid_constant_pool_reference (op1
);
881 /* Relational operations don't work here. We must know the mode
882 of the operands in order to do the comparison correctly.
883 Assuming a full word can give incorrect results.
884 Consider comparing 128 with -128 in QImode. */
886 if (GET_RTX_CLASS (code
) == '<')
889 /* Make sure the constant is second. */
890 if (GET_RTX_CLASS (code
) == 'c'
891 && swap_commutative_operands_p (trueop0
, trueop1
))
893 tem
= op0
, op0
= op1
, op1
= tem
;
894 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
897 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
898 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
899 && GET_CODE (trueop0
) == CONST_DOUBLE
900 && GET_CODE (trueop1
) == CONST_DOUBLE
901 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
903 struct simplify_binary_real_args args
;
904 args
.trueop0
= trueop0
;
905 args
.trueop1
= trueop1
;
909 if (do_float_handler (simplify_binary_real
, (PTR
) &args
))
913 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
915 /* We can fold some multi-word operations. */
916 if (GET_MODE_CLASS (mode
) == MODE_INT
917 && width
== HOST_BITS_PER_WIDE_INT
* 2
918 && (GET_CODE (trueop0
) == CONST_DOUBLE
919 || GET_CODE (trueop0
) == CONST_INT
)
920 && (GET_CODE (trueop1
) == CONST_DOUBLE
921 || GET_CODE (trueop1
) == CONST_INT
))
923 unsigned HOST_WIDE_INT l1
, l2
, lv
;
924 HOST_WIDE_INT h1
, h2
, hv
;
926 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
927 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
929 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
931 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
932 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
934 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
939 /* A - B == A + (-B). */
940 neg_double (l2
, h2
, &lv
, &hv
);
943 /* .. fall through ... */
946 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
950 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
953 case DIV
: case MOD
: case UDIV
: case UMOD
:
954 /* We'd need to include tree.h to do this and it doesn't seem worth
959 lv
= l1
& l2
, hv
= h1
& h2
;
963 lv
= l1
| l2
, hv
= h1
| h2
;
967 lv
= l1
^ l2
, hv
= h1
^ h2
;
973 && ((unsigned HOST_WIDE_INT
) l1
974 < (unsigned HOST_WIDE_INT
) l2
)))
983 && ((unsigned HOST_WIDE_INT
) l1
984 > (unsigned HOST_WIDE_INT
) l2
)))
991 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
993 && ((unsigned HOST_WIDE_INT
) l1
994 < (unsigned HOST_WIDE_INT
) l2
)))
1001 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1003 && ((unsigned HOST_WIDE_INT
) l1
1004 > (unsigned HOST_WIDE_INT
) l2
)))
1010 case LSHIFTRT
: case ASHIFTRT
:
1012 case ROTATE
: case ROTATERT
:
1013 #ifdef SHIFT_COUNT_TRUNCATED
1014 if (SHIFT_COUNT_TRUNCATED
)
1015 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1018 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1021 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1022 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1024 else if (code
== ASHIFT
)
1025 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1026 else if (code
== ROTATE
)
1027 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1028 else /* code == ROTATERT */
1029 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1036 return immed_double_const (lv
, hv
, mode
);
1039 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1040 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1042 /* Even if we can't compute a constant result,
1043 there are some cases worth simplifying. */
1048 /* In IEEE floating point, x+0 is not the same as x. Similarly
1049 for the other optimizations below. */
1050 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
1051 && FLOAT_MODE_P (mode
) && ! flag_unsafe_math_optimizations
)
1054 if (trueop1
== CONST0_RTX (mode
))
1057 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
1058 if (GET_CODE (op0
) == NEG
)
1059 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1060 else if (GET_CODE (op1
) == NEG
)
1061 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1063 /* (~a) + 1 -> -a */
1064 if (INTEGRAL_MODE_P (mode
)
1065 && GET_CODE (op0
) == NOT
1066 && trueop1
== const1_rtx
)
1067 return gen_rtx_NEG (mode
, XEXP (op0
, 0));
1069 /* Handle both-operands-constant cases. We can only add
1070 CONST_INTs to constants since the sum of relocatable symbols
1071 can't be handled by most assemblers. Don't add CONST_INT
1072 to CONST_INT since overflow won't be computed properly if wider
1073 than HOST_BITS_PER_WIDE_INT. */
1075 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1076 && GET_CODE (op1
) == CONST_INT
)
1077 return plus_constant (op0
, INTVAL (op1
));
1078 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1079 && GET_CODE (op0
) == CONST_INT
)
1080 return plus_constant (op1
, INTVAL (op0
));
1082 /* See if this is something like X * C - X or vice versa or
1083 if the multiplication is written as a shift. If so, we can
1084 distribute and make a new multiply, shift, or maybe just
1085 have X (if C is 2 in the example above). But don't make
1086 real multiply if we didn't have one before. */
1088 if (! FLOAT_MODE_P (mode
))
1090 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1091 rtx lhs
= op0
, rhs
= op1
;
1094 if (GET_CODE (lhs
) == NEG
)
1095 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1096 else if (GET_CODE (lhs
) == MULT
1097 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1099 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1102 else if (GET_CODE (lhs
) == ASHIFT
1103 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1104 && INTVAL (XEXP (lhs
, 1)) >= 0
1105 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1107 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1108 lhs
= XEXP (lhs
, 0);
1111 if (GET_CODE (rhs
) == NEG
)
1112 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1113 else if (GET_CODE (rhs
) == MULT
1114 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1116 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1119 else if (GET_CODE (rhs
) == ASHIFT
1120 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1121 && INTVAL (XEXP (rhs
, 1)) >= 0
1122 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1124 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1125 rhs
= XEXP (rhs
, 0);
1128 if (rtx_equal_p (lhs
, rhs
))
1130 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1131 GEN_INT (coeff0
+ coeff1
));
1132 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1136 /* If one of the operands is a PLUS or a MINUS, see if we can
1137 simplify this by the associative law.
1138 Don't use the associative law for floating point.
1139 The inaccuracy makes it nonassociative,
1140 and subtle programs can break if operations are associated. */
1142 if (INTEGRAL_MODE_P (mode
)
1143 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1144 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1145 || (GET_CODE (op0
) == CONST
1146 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1147 || (GET_CODE (op1
) == CONST
1148 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1149 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1155 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1156 using cc0, in which case we want to leave it as a COMPARE
1157 so we can distinguish it from a register-register-copy.
1159 In IEEE floating point, x-0 is not the same as x. */
1161 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1162 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1163 && trueop1
== CONST0_RTX (mode
))
1167 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1168 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1169 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1170 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1172 rtx xop00
= XEXP (op0
, 0);
1173 rtx xop10
= XEXP (op1
, 0);
1176 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1178 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1179 && GET_MODE (xop00
) == GET_MODE (xop10
)
1180 && REGNO (xop00
) == REGNO (xop10
)
1181 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1182 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1189 /* None of these optimizations can be done for IEEE
1191 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
1192 && FLOAT_MODE_P (mode
) && ! flag_unsafe_math_optimizations
)
1195 /* We can't assume x-x is 0 even with non-IEEE floating point,
1196 but since it is zero except in very strange circumstances, we
1197 will treat it as zero with -funsafe-math-optimizations. */
1198 if (rtx_equal_p (trueop0
, trueop1
)
1199 && ! side_effects_p (op0
)
1200 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1201 return CONST0_RTX (mode
);
1203 /* Change subtraction from zero into negation. */
1204 if (trueop0
== CONST0_RTX (mode
))
1205 return gen_rtx_NEG (mode
, op1
);
1207 /* (-1 - a) is ~a. */
1208 if (trueop0
== constm1_rtx
)
1209 return gen_rtx_NOT (mode
, op1
);
1211 /* Subtracting 0 has no effect. */
1212 if (trueop1
== CONST0_RTX (mode
))
1215 /* See if this is something like X * C - X or vice versa or
1216 if the multiplication is written as a shift. If so, we can
1217 distribute and make a new multiply, shift, or maybe just
1218 have X (if C is 2 in the example above). But don't make
1219 real multiply if we didn't have one before. */
1221 if (! FLOAT_MODE_P (mode
))
1223 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1224 rtx lhs
= op0
, rhs
= op1
;
1227 if (GET_CODE (lhs
) == NEG
)
1228 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1229 else if (GET_CODE (lhs
) == MULT
1230 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1232 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1235 else if (GET_CODE (lhs
) == ASHIFT
1236 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1237 && INTVAL (XEXP (lhs
, 1)) >= 0
1238 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1240 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1241 lhs
= XEXP (lhs
, 0);
1244 if (GET_CODE (rhs
) == NEG
)
1245 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1246 else if (GET_CODE (rhs
) == MULT
1247 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1249 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1252 else if (GET_CODE (rhs
) == ASHIFT
1253 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1254 && INTVAL (XEXP (rhs
, 1)) >= 0
1255 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1257 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1258 rhs
= XEXP (rhs
, 0);
1261 if (rtx_equal_p (lhs
, rhs
))
1263 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1264 GEN_INT (coeff0
- coeff1
));
1265 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1269 /* (a - (-b)) -> (a + b). */
1270 if (GET_CODE (op1
) == NEG
)
1271 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1273 /* If one of the operands is a PLUS or a MINUS, see if we can
1274 simplify this by the associative law.
1275 Don't use the associative law for floating point.
1276 The inaccuracy makes it nonassociative,
1277 and subtle programs can break if operations are associated. */
1279 if (INTEGRAL_MODE_P (mode
)
1280 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1281 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1282 || (GET_CODE (op0
) == CONST
1283 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1284 || (GET_CODE (op1
) == CONST
1285 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1286 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1289 /* Don't let a relocatable value get a negative coeff. */
1290 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1291 return simplify_gen_binary (PLUS
, mode
,
1293 neg_const_int (mode
, op1
));
1295 /* (x - (x & y)) -> (x & ~y) */
1296 if (GET_CODE (op1
) == AND
)
1298 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1299 return simplify_gen_binary (AND
, mode
, op0
,
1300 gen_rtx_NOT (mode
, XEXP (op1
, 1)));
1301 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1302 return simplify_gen_binary (AND
, mode
, op0
,
1303 gen_rtx_NOT (mode
, XEXP (op1
, 0)));
1308 if (trueop1
== constm1_rtx
)
1310 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1312 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1315 /* In IEEE floating point, x*0 is not always 0. */
1316 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1317 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1318 && trueop1
== CONST0_RTX (mode
)
1319 && ! side_effects_p (op0
))
1322 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1323 However, ANSI says we can drop signals,
1324 so we can do this anyway. */
1325 if (trueop1
== CONST1_RTX (mode
))
1328 /* Convert multiply by constant power of two into shift unless
1329 we are still generating RTL. This test is a kludge. */
1330 if (GET_CODE (trueop1
) == CONST_INT
1331 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1332 /* If the mode is larger than the host word size, and the
1333 uppermost bit is set, then this isn't a power of two due
1334 to implicit sign extension. */
1335 && (width
<= HOST_BITS_PER_WIDE_INT
1336 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1337 && ! rtx_equal_function_value_matters
)
1338 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1340 if (GET_CODE (trueop1
) == CONST_DOUBLE
1341 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
)
1343 struct simplify_binary_is2orm1_args args
;
1345 args
.value
= trueop1
;
1346 if (! do_float_handler (simplify_binary_is2orm1
, (PTR
) &args
))
1349 /* x*2 is x+x and x*(-1) is -x */
1350 if (args
.is_2
&& GET_MODE (op0
) == mode
)
1351 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1353 else if (args
.is_m1
&& GET_MODE (op0
) == mode
)
1354 return gen_rtx_NEG (mode
, op0
);
1359 if (trueop1
== const0_rtx
)
1361 if (GET_CODE (trueop1
) == CONST_INT
1362 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1363 == GET_MODE_MASK (mode
)))
1365 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1367 /* A | (~A) -> -1 */
1368 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1369 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1370 && ! side_effects_p (op0
)
1371 && GET_MODE_CLASS (mode
) != MODE_CC
)
1376 if (trueop1
== const0_rtx
)
1378 if (GET_CODE (trueop1
) == CONST_INT
1379 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1380 == GET_MODE_MASK (mode
)))
1381 return gen_rtx_NOT (mode
, op0
);
1382 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1383 && GET_MODE_CLASS (mode
) != MODE_CC
)
1388 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1390 if (GET_CODE (trueop1
) == CONST_INT
1391 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1392 == GET_MODE_MASK (mode
)))
1394 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1395 && GET_MODE_CLASS (mode
) != MODE_CC
)
1398 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1399 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1400 && ! side_effects_p (op0
)
1401 && GET_MODE_CLASS (mode
) != MODE_CC
)
1406 /* Convert divide by power of two into shift (divide by 1 handled
1408 if (GET_CODE (trueop1
) == CONST_INT
1409 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1410 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1412 /* ... fall through ... */
1415 if (trueop1
== CONST1_RTX (mode
))
1417 /* On some platforms DIV uses narrower mode than its
1419 rtx x
= gen_lowpart_common (mode
, op0
);
1422 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1423 return gen_lowpart_SUBREG (mode
, op0
);
1428 /* In IEEE floating point, 0/x is not always 0. */
1429 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1430 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1431 && trueop0
== CONST0_RTX (mode
)
1432 && ! side_effects_p (op1
))
1435 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1436 /* Change division by a constant into multiplication. Only do
1437 this with -funsafe-math-optimizations. */
1438 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1439 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1440 && trueop1
!= CONST0_RTX (mode
)
1441 && flag_unsafe_math_optimizations
)
1444 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1446 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1448 #if defined (REAL_ARITHMETIC)
1449 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1450 return gen_rtx_MULT (mode
, op0
,
1451 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1454 gen_rtx_MULT (mode
, op0
,
1455 CONST_DOUBLE_FROM_REAL_VALUE (1./d
, mode
));
1463 /* Handle modulus by power of two (mod with 1 handled below). */
1464 if (GET_CODE (trueop1
) == CONST_INT
1465 && exact_log2 (INTVAL (trueop1
)) > 0)
1466 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1468 /* ... fall through ... */
1471 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1472 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1478 /* Rotating ~0 always results in ~0. */
1479 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1480 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1481 && ! side_effects_p (op1
))
1484 /* ... fall through ... */
1489 if (trueop1
== const0_rtx
)
1491 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1496 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1497 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1498 && ! side_effects_p (op0
))
1500 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1505 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1506 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1507 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1508 && ! side_effects_p (op0
))
1510 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1515 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1517 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1522 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1524 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1532 /* ??? There are simplifications that can be done. */
1542 /* Get the integer argument values in two forms:
1543 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1545 arg0
= INTVAL (trueop0
);
1546 arg1
= INTVAL (trueop1
);
1548 if (width
< HOST_BITS_PER_WIDE_INT
)
1550 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1551 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1554 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1555 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1558 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1559 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1567 /* Compute the value of the arithmetic. */
1572 val
= arg0s
+ arg1s
;
1576 val
= arg0s
- arg1s
;
1580 val
= arg0s
* arg1s
;
1585 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1588 val
= arg0s
/ arg1s
;
1593 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1596 val
= arg0s
% arg1s
;
1601 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1604 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1609 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1612 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1628 /* If shift count is undefined, don't fold it; let the machine do
1629 what it wants. But truncate it if the machine will do that. */
1633 #ifdef SHIFT_COUNT_TRUNCATED
1634 if (SHIFT_COUNT_TRUNCATED
)
1638 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1645 #ifdef SHIFT_COUNT_TRUNCATED
1646 if (SHIFT_COUNT_TRUNCATED
)
1650 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1657 #ifdef SHIFT_COUNT_TRUNCATED
1658 if (SHIFT_COUNT_TRUNCATED
)
1662 val
= arg0s
>> arg1
;
1664 /* Bootstrap compiler may not have sign extended the right shift.
1665 Manually extend the sign to insure bootstrap cc matches gcc. */
1666 if (arg0s
< 0 && arg1
> 0)
1667 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1676 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1677 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1685 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1686 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1690 /* Do nothing here. */
1694 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1698 val
= ((unsigned HOST_WIDE_INT
) arg0
1699 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1703 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1707 val
= ((unsigned HOST_WIDE_INT
) arg0
1708 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1715 val
= trunc_int_for_mode (val
, mode
);
1717 return GEN_INT (val
);
1720 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1723 Rather than test for specific case, we do this by a brute-force method
1724 and do all possible simplifications until no more changes occur. Then
1725 we rebuild the operation.
1727 If FORCE is true, then always generate the rtx. This is used to
1728 canonicalize stuff emitted from simplify_gen_binary. */
1730 struct simplify_plus_minus_op_data
1737 simplify_plus_minus_op_data_cmp (p1
, p2
)
1741 const struct simplify_plus_minus_op_data
*d1
= p1
;
1742 const struct simplify_plus_minus_op_data
*d2
= p2
;
1744 return (commutative_operand_precedence (d2
->op
)
1745 - commutative_operand_precedence (d1
->op
));
1749 simplify_plus_minus (code
, mode
, op0
, op1
, force
)
1751 enum machine_mode mode
;
1755 struct simplify_plus_minus_op_data ops
[8];
1757 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
1758 int first
, negate
, changed
;
1761 memset ((char *) ops
, 0, sizeof ops
);
1763 /* Set up the two operands and then expand them until nothing has been
1764 changed. If we run out of room in our array, give up; this should
1765 almost never happen. */
1770 ops
[1].neg
= (code
== MINUS
);
1776 for (i
= 0; i
< n_ops
; i
++)
1778 rtx this_op
= ops
[i
].op
;
1779 int this_neg
= ops
[i
].neg
;
1780 enum rtx_code this_code
= GET_CODE (this_op
);
1793 ops
[n_ops
].op
= XEXP (this_op
, 1);
1794 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
1797 ops
[i
].op
= XEXP (this_op
, 0);
1803 ops
[i
].op
= XEXP (this_op
, 0);
1804 ops
[i
].neg
= ! this_neg
;
1810 && GET_CODE (XEXP (this_op
, 0)) == PLUS
1811 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
1812 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
1814 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
1815 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
1816 ops
[n_ops
].neg
= this_neg
;
1824 /* ~a -> (-a - 1) */
1827 ops
[n_ops
].op
= constm1_rtx
;
1828 ops
[n_ops
++].neg
= this_neg
;
1829 ops
[i
].op
= XEXP (this_op
, 0);
1830 ops
[i
].neg
= !this_neg
;
1838 ops
[i
].op
= neg_const_int (mode
, this_op
);
1851 /* If we only have two operands, we can't do anything. */
1852 if (n_ops
<= 2 && !force
)
1855 /* Count the number of CONSTs we didn't split above. */
1856 for (i
= 0; i
< n_ops
; i
++)
1857 if (GET_CODE (ops
[i
].op
) == CONST
)
1860 /* Now simplify each pair of operands until nothing changes. The first
1861 time through just simplify constants against each other. */
1868 for (i
= 0; i
< n_ops
- 1; i
++)
1869 for (j
= i
+ 1; j
< n_ops
; j
++)
1871 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
1872 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
1874 if (lhs
!= 0 && rhs
!= 0
1875 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
1877 enum rtx_code ncode
= PLUS
;
1883 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
1885 else if (swap_commutative_operands_p (lhs
, rhs
))
1886 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
1888 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
1890 /* Reject "simplifications" that just wrap the two
1891 arguments in a CONST. Failure to do so can result
1892 in infinite recursion with simplify_binary_operation
1893 when it calls us to simplify CONST operations. */
1895 && ! (GET_CODE (tem
) == CONST
1896 && GET_CODE (XEXP (tem
, 0)) == ncode
1897 && XEXP (XEXP (tem
, 0), 0) == lhs
1898 && XEXP (XEXP (tem
, 0), 1) == rhs
)
1899 /* Don't allow -x + -1 -> ~x simplifications in the
1900 first pass. This allows us the chance to combine
1901 the -1 with other constants. */
1903 && GET_CODE (tem
) == NOT
1904 && XEXP (tem
, 0) == rhs
))
1907 if (GET_CODE (tem
) == NEG
)
1908 tem
= XEXP (tem
, 0), lneg
= !lneg
;
1909 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
1910 tem
= neg_const_int (mode
, tem
), lneg
= 0;
1914 ops
[j
].op
= NULL_RTX
;
1924 /* Pack all the operands to the lower-numbered entries. */
1925 for (i
= 0, j
= 0; j
< n_ops
; j
++)
1930 /* Sort the operations based on swap_commutative_operands_p. */
1931 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
1933 /* We suppressed creation of trivial CONST expressions in the
1934 combination loop to avoid recursion. Create one manually now.
1935 The combination loop should have ensured that there is exactly
1936 one CONST_INT, and the sort will have ensured that it is last
1937 in the array and that any other constant will be next-to-last. */
1940 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
1941 && CONSTANT_P (ops
[n_ops
- 2].op
))
1943 rtx value
= ops
[n_ops
- 1].op
;
1944 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
1945 value
= neg_const_int (mode
, value
);
1946 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
1950 /* Count the number of CONSTs that we generated. */
1952 for (i
= 0; i
< n_ops
; i
++)
1953 if (GET_CODE (ops
[i
].op
) == CONST
)
1956 /* Give up if we didn't reduce the number of operands we had. Make
1957 sure we count a CONST as two operands. If we have the same
1958 number of operands, but have made more CONSTs than before, this
1959 is also an improvement, so accept it. */
1961 && (n_ops
+ n_consts
> input_ops
1962 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
1965 /* Put a non-negated operand first. If there aren't any, make all
1966 operands positive and negate the whole thing later. */
1969 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
1973 for (i
= 0; i
< n_ops
; i
++)
1985 /* Now make the result by performing the requested operations. */
1987 for (i
= 1; i
< n_ops
; i
++)
1988 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
1989 mode
, result
, ops
[i
].op
);
1991 return negate
? gen_rtx_NEG (mode
, result
) : result
;
1996 rtx op0
, op1
; /* Input */
1997 int equal
, op0lt
, op1lt
; /* Output */
2002 check_fold_consts (data
)
2005 struct cfc_args
*args
= (struct cfc_args
*) data
;
2006 REAL_VALUE_TYPE d0
, d1
;
2008 /* We may possibly raise an exception while reading the value. */
2009 args
->unordered
= 1;
2010 REAL_VALUE_FROM_CONST_DOUBLE (d0
, args
->op0
);
2011 REAL_VALUE_FROM_CONST_DOUBLE (d1
, args
->op1
);
2013 /* Comparisons of Inf versus Inf are ordered. */
2014 if (REAL_VALUE_ISNAN (d0
)
2015 || REAL_VALUE_ISNAN (d1
))
2017 args
->equal
= REAL_VALUES_EQUAL (d0
, d1
);
2018 args
->op0lt
= REAL_VALUES_LESS (d0
, d1
);
2019 args
->op1lt
= REAL_VALUES_LESS (d1
, d0
);
2020 args
->unordered
= 0;
2023 /* Like simplify_binary_operation except used for relational operators.
2024 MODE is the mode of the operands, not that of the result. If MODE
2025 is VOIDmode, both operands must also be VOIDmode and we compare the
2026 operands in "infinite precision".
2028 If no simplification is possible, this function returns zero. Otherwise,
2029 it returns either const_true_rtx or const0_rtx. */
2032 simplify_relational_operation (code
, mode
, op0
, op1
)
2034 enum machine_mode mode
;
2037 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2042 if (mode
== VOIDmode
2043 && (GET_MODE (op0
) != VOIDmode
2044 || GET_MODE (op1
) != VOIDmode
))
2047 /* If op0 is a compare, extract the comparison arguments from it. */
2048 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2049 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2051 trueop0
= avoid_constant_pool_reference (op0
);
2052 trueop1
= avoid_constant_pool_reference (op1
);
2054 /* We can't simplify MODE_CC values since we don't know what the
2055 actual comparison is. */
2056 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
2063 /* Make sure the constant is second. */
2064 if (swap_commutative_operands_p (trueop0
, trueop1
))
2066 tem
= op0
, op0
= op1
, op1
= tem
;
2067 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
2068 code
= swap_condition (code
);
2071 /* For integer comparisons of A and B maybe we can simplify A - B and can
2072 then simplify a comparison of that with zero. If A and B are both either
2073 a register or a CONST_INT, this can't help; testing for these cases will
2074 prevent infinite recursion here and speed things up.
2076 If CODE is an unsigned comparison, then we can never do this optimization,
2077 because it gives an incorrect result if the subtraction wraps around zero.
2078 ANSI C defines unsigned operations such that they never overflow, and
2079 thus such cases can not be ignored. */
2081 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2082 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2083 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2084 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2085 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2086 return simplify_relational_operation (signed_condition (code
),
2087 mode
, tem
, const0_rtx
);
2089 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2090 return const_true_rtx
;
2092 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2095 /* For non-IEEE floating-point, if the two operands are equal, we know the
2097 if (rtx_equal_p (trueop0
, trueop1
)
2098 && (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
2099 || ! FLOAT_MODE_P (GET_MODE (trueop0
))
2100 || flag_unsafe_math_optimizations
))
2101 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2103 /* If the operands are floating-point constants, see if we can fold
2105 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2106 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2107 && GET_CODE (trueop1
) == CONST_DOUBLE
2108 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2110 struct cfc_args args
;
2112 /* Setup input for check_fold_consts() */
2117 if (!do_float_handler (check_fold_consts
, (PTR
) &args
))
2130 return const_true_rtx
;
2143 /* Receive output from check_fold_consts() */
2145 op0lt
= op0ltu
= args
.op0lt
;
2146 op1lt
= op1ltu
= args
.op1lt
;
2148 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2150 /* Otherwise, see if the operands are both integers. */
2151 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2152 && (GET_CODE (trueop0
) == CONST_DOUBLE
2153 || GET_CODE (trueop0
) == CONST_INT
)
2154 && (GET_CODE (trueop1
) == CONST_DOUBLE
2155 || GET_CODE (trueop1
) == CONST_INT
))
2157 int width
= GET_MODE_BITSIZE (mode
);
2158 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2159 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2161 /* Get the two words comprising each integer constant. */
2162 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2164 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2165 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2169 l0u
= l0s
= INTVAL (trueop0
);
2170 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2173 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2175 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2176 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2180 l1u
= l1s
= INTVAL (trueop1
);
2181 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2184 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2185 we have to sign or zero-extend the values. */
2186 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2188 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2189 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2191 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2192 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2194 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2195 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2197 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2198 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2200 equal
= (h0u
== h1u
&& l0u
== l1u
);
2201 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2202 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2203 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2204 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2207 /* Otherwise, there are some code-specific tests we can make. */
2213 /* References to the frame plus a constant or labels cannot
2214 be zero, but a SYMBOL_REF can due to #pragma weak. */
2215 if (((NONZERO_BASE_PLUS_P (op0
) && trueop1
== const0_rtx
)
2216 || GET_CODE (trueop0
) == LABEL_REF
)
2217 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2218 /* On some machines, the ap reg can be 0 sometimes. */
2219 && op0
!= arg_pointer_rtx
2226 if (((NONZERO_BASE_PLUS_P (op0
) && trueop1
== const0_rtx
)
2227 || GET_CODE (trueop0
) == LABEL_REF
)
2228 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2229 && op0
!= arg_pointer_rtx
2232 return const_true_rtx
;
2236 /* Unsigned values are never negative. */
2237 if (trueop1
== const0_rtx
)
2238 return const_true_rtx
;
2242 if (trueop1
== const0_rtx
)
2247 /* Unsigned values are never greater than the largest
2249 if (GET_CODE (trueop1
) == CONST_INT
2250 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2251 && INTEGRAL_MODE_P (mode
))
2252 return const_true_rtx
;
2256 if (GET_CODE (trueop1
) == CONST_INT
2257 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2258 && INTEGRAL_MODE_P (mode
))
2269 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2275 return equal
? const_true_rtx
: const0_rtx
;
2278 return ! equal
? const_true_rtx
: const0_rtx
;
2281 return op0lt
? const_true_rtx
: const0_rtx
;
2284 return op1lt
? const_true_rtx
: const0_rtx
;
2286 return op0ltu
? const_true_rtx
: const0_rtx
;
2288 return op1ltu
? const_true_rtx
: const0_rtx
;
2291 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2294 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2296 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2298 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2300 return const_true_rtx
;
2308 /* Simplify CODE, an operation with result mode MODE and three operands,
2309 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2310 a constant. Return 0 if no simplifications is possible. */
2313 simplify_ternary_operation (code
, mode
, op0_mode
, op0
, op1
, op2
)
2315 enum machine_mode mode
, op0_mode
;
2318 unsigned int width
= GET_MODE_BITSIZE (mode
);
2320 /* VOIDmode means "infinite" precision. */
2322 width
= HOST_BITS_PER_WIDE_INT
;
2328 if (GET_CODE (op0
) == CONST_INT
2329 && GET_CODE (op1
) == CONST_INT
2330 && GET_CODE (op2
) == CONST_INT
2331 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2332 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2334 /* Extracting a bit-field from a constant */
2335 HOST_WIDE_INT val
= INTVAL (op0
);
2337 if (BITS_BIG_ENDIAN
)
2338 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2339 - INTVAL (op2
) - INTVAL (op1
));
2341 val
>>= INTVAL (op2
);
2343 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2345 /* First zero-extend. */
2346 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2347 /* If desired, propagate sign bit. */
2348 if (code
== SIGN_EXTRACT
2349 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2350 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2353 /* Clear the bits that don't belong in our mode,
2354 unless they and our sign bit are all one.
2355 So we get either a reasonable negative value or a reasonable
2356 unsigned value for this mode. */
2357 if (width
< HOST_BITS_PER_WIDE_INT
2358 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2359 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2360 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2362 return GEN_INT (val
);
2367 if (GET_CODE (op0
) == CONST_INT
)
2368 return op0
!= const0_rtx
? op1
: op2
;
2370 /* Convert a == b ? b : a to "a". */
2371 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
2372 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
2373 && rtx_equal_p (XEXP (op0
, 0), op1
)
2374 && rtx_equal_p (XEXP (op0
, 1), op2
))
2376 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
2377 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
2378 && rtx_equal_p (XEXP (op0
, 1), op1
)
2379 && rtx_equal_p (XEXP (op0
, 0), op2
))
2381 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2383 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2384 ? GET_MODE (XEXP (op0
, 1))
2385 : GET_MODE (XEXP (op0
, 0)));
2387 if (cmp_mode
== VOIDmode
)
2388 cmp_mode
= op0_mode
;
2389 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2390 XEXP (op0
, 0), XEXP (op0
, 1));
2392 /* See if any simplifications were possible. */
2393 if (temp
== const0_rtx
)
2395 else if (temp
== const1_rtx
)
2400 /* Look for happy constants in op1 and op2. */
2401 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2403 HOST_WIDE_INT t
= INTVAL (op1
);
2404 HOST_WIDE_INT f
= INTVAL (op2
);
2406 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2407 code
= GET_CODE (op0
);
2408 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2411 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2419 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2431 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2432 Return 0 if no simplifications is possible. */
2434 simplify_subreg (outermode
, op
, innermode
, byte
)
2437 enum machine_mode outermode
, innermode
;
2439 /* Little bit of sanity checking. */
2440 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2441 || innermode
== BLKmode
|| outermode
== BLKmode
)
2444 if (GET_MODE (op
) != innermode
2445 && GET_MODE (op
) != VOIDmode
)
2448 if (byte
% GET_MODE_SIZE (outermode
)
2449 || byte
>= GET_MODE_SIZE (innermode
))
2452 if (outermode
== innermode
&& !byte
)
2455 /* Attempt to simplify constant to non-SUBREG expression. */
2456 if (CONSTANT_P (op
))
2459 unsigned HOST_WIDE_INT val
= 0;
2461 /* ??? This code is partly redundant with code below, but can handle
2462 the subregs of floats and similar corner cases.
2463 Later it we should move all simplification code here and rewrite
2464 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2465 using SIMPLIFY_SUBREG. */
2466 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
2468 rtx
new = gen_lowpart_if_possible (outermode
, op
);
2473 /* Similar comment as above apply here. */
2474 if (GET_MODE_SIZE (outermode
) == UNITS_PER_WORD
2475 && GET_MODE_SIZE (innermode
) > UNITS_PER_WORD
2476 && GET_MODE_CLASS (outermode
) == MODE_INT
)
2478 rtx
new = constant_subword (op
,
2479 (byte
/ UNITS_PER_WORD
),
2485 offset
= byte
* BITS_PER_UNIT
;
2486 switch (GET_CODE (op
))
2489 if (GET_MODE (op
) != VOIDmode
)
2492 /* We can't handle this case yet. */
2493 if (GET_MODE_BITSIZE (outermode
) >= HOST_BITS_PER_WIDE_INT
)
2496 part
= offset
>= HOST_BITS_PER_WIDE_INT
;
2497 if ((BITS_PER_WORD
> HOST_BITS_PER_WIDE_INT
2498 && BYTES_BIG_ENDIAN
)
2499 || (BITS_PER_WORD
<= HOST_BITS_PER_WIDE_INT
2500 && WORDS_BIG_ENDIAN
))
2502 val
= part
? CONST_DOUBLE_HIGH (op
) : CONST_DOUBLE_LOW (op
);
2503 offset
%= HOST_BITS_PER_WIDE_INT
;
2505 /* We've already picked the word we want from a double, so
2506 pretend this is actually an integer. */
2507 innermode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
2511 if (GET_CODE (op
) == CONST_INT
)
2514 /* We don't handle synthetizing of non-integral constants yet. */
2515 if (GET_MODE_CLASS (outermode
) != MODE_INT
)
2518 if (BYTES_BIG_ENDIAN
|| WORDS_BIG_ENDIAN
)
2520 if (WORDS_BIG_ENDIAN
)
2521 offset
= (GET_MODE_BITSIZE (innermode
)
2522 - GET_MODE_BITSIZE (outermode
) - offset
);
2523 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
2524 && GET_MODE_SIZE (outermode
) < UNITS_PER_WORD
)
2525 offset
= (offset
+ BITS_PER_WORD
- GET_MODE_BITSIZE (outermode
)
2526 - 2 * (offset
% BITS_PER_WORD
));
2529 if (offset
>= HOST_BITS_PER_WIDE_INT
)
2530 return ((HOST_WIDE_INT
) val
< 0) ? constm1_rtx
: const0_rtx
;
2534 if (GET_MODE_BITSIZE (outermode
) < HOST_BITS_PER_WIDE_INT
)
2535 val
= trunc_int_for_mode (val
, outermode
);
2536 return GEN_INT (val
);
2543 /* Changing mode twice with SUBREG => just change it once,
2544 or not at all if changing back op starting mode. */
2545 if (GET_CODE (op
) == SUBREG
)
2547 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
2548 int final_offset
= byte
+ SUBREG_BYTE (op
);
2551 if (outermode
== innermostmode
2552 && byte
== 0 && SUBREG_BYTE (op
) == 0)
2553 return SUBREG_REG (op
);
2555 /* The SUBREG_BYTE represents offset, as if the value were stored
2556 in memory. Irritating exception is paradoxical subreg, where
2557 we define SUBREG_BYTE to be 0. On big endian machines, this
2558 value should be negative. For a moment, undo this exception. */
2559 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
2561 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
2562 if (WORDS_BIG_ENDIAN
)
2563 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2564 if (BYTES_BIG_ENDIAN
)
2565 final_offset
+= difference
% UNITS_PER_WORD
;
2567 if (SUBREG_BYTE (op
) == 0
2568 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
2570 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
2571 if (WORDS_BIG_ENDIAN
)
2572 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2573 if (BYTES_BIG_ENDIAN
)
2574 final_offset
+= difference
% UNITS_PER_WORD
;
2577 /* See whether resulting subreg will be paradoxical. */
2578 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
2580 /* In nonparadoxical subregs we can't handle negative offsets. */
2581 if (final_offset
< 0)
2583 /* Bail out in case resulting subreg would be incorrect. */
2584 if (final_offset
% GET_MODE_SIZE (outermode
)
2585 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
2591 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
2593 /* In paradoxical subreg, see if we are still looking on lower part.
2594 If so, our SUBREG_BYTE will be 0. */
2595 if (WORDS_BIG_ENDIAN
)
2596 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2597 if (BYTES_BIG_ENDIAN
)
2598 offset
+= difference
% UNITS_PER_WORD
;
2599 if (offset
== final_offset
)
2605 /* Recurse for futher possible simplifications. */
2606 new = simplify_subreg (outermode
, SUBREG_REG (op
),
2607 GET_MODE (SUBREG_REG (op
)),
2611 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
2614 /* SUBREG of a hard register => just change the register number
2615 and/or mode. If the hard register is not valid in that mode,
2616 suppress this simplification. If the hard register is the stack,
2617 frame, or argument pointer, leave this as a SUBREG. */
2620 && (! REG_FUNCTION_VALUE_P (op
)
2621 || ! rtx_equal_function_value_matters
)
2622 #ifdef CLASS_CANNOT_CHANGE_MODE
2623 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode
, innermode
)
2624 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
2625 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
2626 && (TEST_HARD_REG_BIT
2627 (reg_class_contents
[(int) CLASS_CANNOT_CHANGE_MODE
],
2630 && REGNO (op
) < FIRST_PSEUDO_REGISTER
2631 && ((reload_completed
&& !frame_pointer_needed
)
2632 || (REGNO (op
) != FRAME_POINTER_REGNUM
2633 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2634 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
2637 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2638 && REGNO (op
) != ARG_POINTER_REGNUM
2640 && REGNO (op
) != STACK_POINTER_REGNUM
)
2642 int final_regno
= subreg_hard_regno (gen_rtx_SUBREG (outermode
, op
, byte
),
2645 /* ??? We do allow it if the current REG is not valid for
2646 its mode. This is a kludge to work around how float/complex
2647 arguments are passed on 32-bit Sparc and should be fixed. */
2648 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
2649 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
2651 rtx x
= gen_rtx_REG (outermode
, final_regno
);
2653 /* Propagate original regno. We don't have any way to specify
2654 the offset inside orignal regno, so do so only for lowpart.
2655 The information is used only by alias analysis that can not
2656 grog partial register anyway. */
2658 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
2659 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
2664 /* If we have a SUBREG of a register that we are replacing and we are
2665 replacing it with a MEM, make a new MEM and try replacing the
2666 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2667 or if we would be widening it. */
2669 if (GET_CODE (op
) == MEM
2670 && ! mode_dependent_address_p (XEXP (op
, 0))
2671 /* Allow splitting of volatile memory references in case we don't
2672 have instruction to move the whole thing. */
2673 && (! MEM_VOLATILE_P (op
)
2674 || ! have_insn_for (SET
, innermode
))
2675 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
2676 return adjust_address_nv (op
, outermode
, byte
);
2678 /* Handle complex values represented as CONCAT
2679 of real and imaginary part. */
2680 if (GET_CODE (op
) == CONCAT
)
2682 int is_realpart
= byte
< GET_MODE_UNIT_SIZE (innermode
);
2683 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
2684 unsigned int final_offset
;
2687 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
2688 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
2691 /* We can at least simplify it by referring directly to the relevant part. */
2692 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
2697 /* Make a SUBREG operation or equivalent if it folds. */
2700 simplify_gen_subreg (outermode
, op
, innermode
, byte
)
2703 enum machine_mode outermode
, innermode
;
2706 /* Little bit of sanity checking. */
2707 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2708 || innermode
== BLKmode
|| outermode
== BLKmode
)
2711 if (GET_MODE (op
) != innermode
2712 && GET_MODE (op
) != VOIDmode
)
2715 if (byte
% GET_MODE_SIZE (outermode
)
2716 || byte
>= GET_MODE_SIZE (innermode
))
2719 if (GET_CODE (op
) == QUEUED
)
2722 new = simplify_subreg (outermode
, op
, innermode
, byte
);
2726 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
2729 return gen_rtx_SUBREG (outermode
, op
, byte
);
2731 /* Simplify X, an rtx expression.
2733 Return the simplified expression or NULL if no simplifications
2736 This is the preferred entry point into the simplification routines;
2737 however, we still allow passes to call the more specific routines.
2739 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2740 code that need to be unified.
2742 1. fold_rtx in cse.c. This code uses various CSE specific
2743 information to aid in RTL simplification.
2745 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2746 it uses combine specific information to aid in RTL
2749 3. The routines in this file.
2752 Long term we want to only have one body of simplification code; to
2753 get to that state I recommend the following steps:
2755 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2756 which are not pass dependent state into these routines.
2758 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2759 use this routine whenever possible.
2761 3. Allow for pass dependent state to be provided to these
2762 routines and add simplifications based on the pass dependent
2763 state. Remove code from cse.c & combine.c that becomes
2766 It will take time, but ultimately the compiler will be easier to
2767 maintain and improve. It's totally silly that when we add a
2768 simplification that it needs to be added to 4 places (3 for RTL
2769 simplification and 1 for tree simplification. */
2775 enum rtx_code code
= GET_CODE (x
);
2776 enum machine_mode mode
= GET_MODE (x
);
2778 switch (GET_RTX_CLASS (code
))
2781 return simplify_unary_operation (code
, mode
,
2782 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
2784 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
2789 XEXP (x
, 0) = XEXP (x
, 1);
2791 return simplify_binary_operation (code
, mode
,
2792 XEXP (x
, 0), XEXP (x
, 1));
2796 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
2800 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
2801 XEXP (x
, 0), XEXP (x
, 1),
2805 return simplify_relational_operation (code
,
2806 ((GET_MODE (XEXP (x
, 0))
2808 ? GET_MODE (XEXP (x
, 0))
2809 : GET_MODE (XEXP (x
, 1))),
2810 XEXP (x
, 0), XEXP (x
, 1));
2812 /* The only case we try to handle is a SUBREG. */
2814 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
2815 GET_MODE (SUBREG_REG (x
)),