1 /* Common subexpression elimination for GNU compiler.
2 Copyright (C) 1987, 88, 89, 92-7, 1998, 1999 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
23 /* stdio.h must precede rtl.h for FFS. */
30 #include "hard-reg-set.h"
33 #include "insn-config.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
65 /* Similar, but also allows reference to the stack pointer.
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
92 static rtx simplify_plus_minus
PROTO((enum rtx_code
, enum machine_mode
,
94 static void check_fold_consts
PROTO((PTR
));
96 /* Make a binary operation by properly ordering the operands and
97 seeing if the expression folds. */
100 simplify_gen_binary (code
, mode
, op0
, op1
)
102 enum machine_mode mode
;
107 /* Put complex operands first and constants second if commutative. */
108 if (GET_RTX_CLASS (code
) == 'c'
109 && ((CONSTANT_P (op0
) && GET_CODE (op1
) != CONST_INT
)
110 || (GET_RTX_CLASS (GET_CODE (op0
)) == 'o'
111 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')
112 || (GET_CODE (op0
) == SUBREG
113 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0
))) == 'o'
114 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')))
115 tem
= op0
, op0
= op1
, op1
= tem
;
117 /* If this simplifies, do it. */
118 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
123 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
124 just form the operation. */
126 if (code
== PLUS
&& GET_CODE (op1
) == CONST_INT
127 && GET_MODE (op0
) != VOIDmode
)
128 return plus_constant (op0
, INTVAL (op1
));
129 else if (code
== MINUS
&& GET_CODE (op1
) == CONST_INT
130 && GET_MODE (op0
) != VOIDmode
)
131 return plus_constant (op0
, - INTVAL (op1
));
133 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
136 /* Try to simplify a unary operation CODE whose output mode is to be
137 MODE with input operand OP whose mode was originally OP_MODE.
138 Return zero if no simplification can be made. */
141 simplify_unary_operation (code
, mode
, op
, op_mode
)
143 enum machine_mode mode
;
145 enum machine_mode op_mode
;
147 register int width
= GET_MODE_BITSIZE (mode
);
149 /* The order of these tests is critical so that, for example, we don't
150 check the wrong mode (input vs. output) for a conversion operation,
151 such as FIX. At some point, this should be simplified. */
153 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
155 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
156 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
158 HOST_WIDE_INT hv
, lv
;
161 if (GET_CODE (op
) == CONST_INT
)
162 lv
= INTVAL (op
), hv
= INTVAL (op
) < 0 ? -1 : 0;
164 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
166 #ifdef REAL_ARITHMETIC
167 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
172 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
173 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
174 d
+= (double) (unsigned HOST_WIDE_INT
) (~ lv
);
180 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
181 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
182 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
184 #endif /* REAL_ARITHMETIC */
185 d
= real_value_truncate (mode
, d
);
186 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
188 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
189 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
191 HOST_WIDE_INT hv
, lv
;
194 if (GET_CODE (op
) == CONST_INT
)
195 lv
= INTVAL (op
), hv
= INTVAL (op
) < 0 ? -1 : 0;
197 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
199 if (op_mode
== VOIDmode
)
201 /* We don't know how to interpret negative-looking numbers in
202 this case, so don't try to fold those. */
206 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
209 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
211 #ifdef REAL_ARITHMETIC
212 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
215 d
= (double) (unsigned HOST_WIDE_INT
) hv
;
216 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
217 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
218 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
219 #endif /* REAL_ARITHMETIC */
220 d
= real_value_truncate (mode
, d
);
221 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
225 if (GET_CODE (op
) == CONST_INT
226 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
228 register HOST_WIDE_INT arg0
= INTVAL (op
);
229 register HOST_WIDE_INT val
;
242 val
= (arg0
>= 0 ? arg0
: - arg0
);
246 /* Don't use ffs here. Instead, get low order bit and then its
247 number. If arg0 is zero, this will return 0, as desired. */
248 arg0
&= GET_MODE_MASK (mode
);
249 val
= exact_log2 (arg0
& (- arg0
)) + 1;
257 if (op_mode
== VOIDmode
)
259 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
261 /* If we were really extending the mode,
262 we would have to distinguish between zero-extension
263 and sign-extension. */
264 if (width
!= GET_MODE_BITSIZE (op_mode
))
268 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
269 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
275 if (op_mode
== VOIDmode
)
277 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
279 /* If we were really extending the mode,
280 we would have to distinguish between zero-extension
281 and sign-extension. */
282 if (width
!= GET_MODE_BITSIZE (op_mode
))
286 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
289 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
291 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
292 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
305 val
= trunc_int_for_mode (val
, mode
);
307 return GEN_INT (val
);
310 /* We can do some operations on integer CONST_DOUBLEs. Also allow
311 for a DImode operation on a CONST_INT. */
312 else if (GET_MODE (op
) == VOIDmode
&& width
<= HOST_BITS_PER_INT
* 2
313 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
315 HOST_WIDE_INT l1
, h1
, lv
, hv
;
317 if (GET_CODE (op
) == CONST_DOUBLE
)
318 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
320 l1
= INTVAL (op
), h1
= l1
< 0 ? -1 : 0;
330 neg_double (l1
, h1
, &lv
, &hv
);
335 neg_double (l1
, h1
, &lv
, &hv
);
343 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& (-h1
)) + 1;
345 lv
= exact_log2 (l1
& (-l1
)) + 1;
349 /* This is just a change-of-mode, so do nothing. */
354 if (op_mode
== VOIDmode
355 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
359 lv
= l1
& GET_MODE_MASK (op_mode
);
363 if (op_mode
== VOIDmode
364 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
368 lv
= l1
& GET_MODE_MASK (op_mode
);
369 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
370 && (lv
& ((HOST_WIDE_INT
) 1
371 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
372 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
374 hv
= (lv
< 0) ? ~ (HOST_WIDE_INT
) 0 : 0;
385 return immed_double_const (lv
, hv
, mode
);
388 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
389 else if (GET_CODE (op
) == CONST_DOUBLE
390 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
396 if (setjmp (handler
))
397 /* There used to be a warning here, but that is inadvisable.
398 People may want to cause traps, and the natural way
399 to do it should not get a warning. */
402 set_float_handler (handler
);
404 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
409 d
= REAL_VALUE_NEGATE (d
);
413 if (REAL_VALUE_NEGATIVE (d
))
414 d
= REAL_VALUE_NEGATE (d
);
418 d
= real_value_truncate (mode
, d
);
422 /* All this does is change the mode. */
426 d
= REAL_VALUE_RNDZINT (d
);
430 d
= REAL_VALUE_UNSIGNED_RNDZINT (d
);
440 x
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
441 set_float_handler (NULL_PTR
);
445 else if (GET_CODE (op
) == CONST_DOUBLE
446 && GET_MODE_CLASS (GET_MODE (op
)) == MODE_FLOAT
447 && GET_MODE_CLASS (mode
) == MODE_INT
448 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
454 if (setjmp (handler
))
457 set_float_handler (handler
);
459 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
464 val
= REAL_VALUE_FIX (d
);
468 val
= REAL_VALUE_UNSIGNED_FIX (d
);
475 set_float_handler (NULL_PTR
);
477 val
= trunc_int_for_mode (val
, mode
);
479 return GEN_INT (val
);
482 /* This was formerly used only for non-IEEE float.
483 eggert@twinsun.com says it is safe for IEEE also. */
486 /* There are some simplifications we can do even if the operands
492 /* (not (not X)) == X, similarly for NEG. */
493 if (GET_CODE (op
) == code
)
498 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
499 becomes just the MINUS if its mode is MODE. This allows
500 folding switch statements on machines using casesi (such as
502 if (GET_CODE (op
) == TRUNCATE
503 && GET_MODE (XEXP (op
, 0)) == mode
504 && GET_CODE (XEXP (op
, 0)) == MINUS
505 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
506 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
509 #ifdef POINTERS_EXTEND_UNSIGNED
510 if (! POINTERS_EXTEND_UNSIGNED
511 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
513 return convert_memory_address (Pmode
, op
);
517 #ifdef POINTERS_EXTEND_UNSIGNED
519 if (POINTERS_EXTEND_UNSIGNED
520 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
522 return convert_memory_address (Pmode
, op
);
534 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
535 and OP1. Return 0 if no simplification is possible.
537 Don't use this for relational operations such as EQ or LT.
538 Use simplify_relational_operation instead. */
541 simplify_binary_operation (code
, mode
, op0
, op1
)
543 enum machine_mode mode
;
546 register HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
548 int width
= GET_MODE_BITSIZE (mode
);
551 /* Relational operations don't work here. We must know the mode
552 of the operands in order to do the comparison correctly.
553 Assuming a full word can give incorrect results.
554 Consider comparing 128 with -128 in QImode. */
556 if (GET_RTX_CLASS (code
) == '<')
559 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
560 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
561 && GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
562 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
564 REAL_VALUE_TYPE f0
, f1
, value
;
567 if (setjmp (handler
))
570 set_float_handler (handler
);
572 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
573 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
574 f0
= real_value_truncate (mode
, f0
);
575 f1
= real_value_truncate (mode
, f1
);
577 #ifdef REAL_ARITHMETIC
578 #ifndef REAL_INFINITY
579 if (code
== DIV
&& REAL_VALUES_EQUAL (f1
, dconst0
))
582 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
596 #ifndef REAL_INFINITY
603 value
= MIN (f0
, f1
);
606 value
= MAX (f0
, f1
);
613 value
= real_value_truncate (mode
, value
);
614 set_float_handler (NULL_PTR
);
615 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
617 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
619 /* We can fold some multi-word operations. */
620 if (GET_MODE_CLASS (mode
) == MODE_INT
621 && width
== HOST_BITS_PER_WIDE_INT
* 2
622 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
623 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
625 HOST_WIDE_INT l1
, l2
, h1
, h2
, lv
, hv
;
627 if (GET_CODE (op0
) == CONST_DOUBLE
)
628 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
630 l1
= INTVAL (op0
), h1
= l1
< 0 ? -1 : 0;
632 if (GET_CODE (op1
) == CONST_DOUBLE
)
633 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
635 l2
= INTVAL (op1
), h2
= l2
< 0 ? -1 : 0;
640 /* A - B == A + (-B). */
641 neg_double (l2
, h2
, &lv
, &hv
);
644 /* .. fall through ... */
647 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
651 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
654 case DIV
: case MOD
: case UDIV
: case UMOD
:
655 /* We'd need to include tree.h to do this and it doesn't seem worth
660 lv
= l1
& l2
, hv
= h1
& h2
;
664 lv
= l1
| l2
, hv
= h1
| h2
;
668 lv
= l1
^ l2
, hv
= h1
^ h2
;
674 && ((unsigned HOST_WIDE_INT
) l1
675 < (unsigned HOST_WIDE_INT
) l2
)))
684 && ((unsigned HOST_WIDE_INT
) l1
685 > (unsigned HOST_WIDE_INT
) l2
)))
692 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
694 && ((unsigned HOST_WIDE_INT
) l1
695 < (unsigned HOST_WIDE_INT
) l2
)))
702 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
704 && ((unsigned HOST_WIDE_INT
) l1
705 > (unsigned HOST_WIDE_INT
) l2
)))
711 case LSHIFTRT
: case ASHIFTRT
:
713 case ROTATE
: case ROTATERT
:
714 #ifdef SHIFT_COUNT_TRUNCATED
715 if (SHIFT_COUNT_TRUNCATED
)
716 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
719 if (h2
!= 0 || l2
< 0 || l2
>= GET_MODE_BITSIZE (mode
))
722 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
723 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
725 else if (code
== ASHIFT
)
726 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
727 else if (code
== ROTATE
)
728 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
729 else /* code == ROTATERT */
730 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
737 return immed_double_const (lv
, hv
, mode
);
740 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
741 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
743 /* Even if we can't compute a constant result,
744 there are some cases worth simplifying. */
749 /* In IEEE floating point, x+0 is not the same as x. Similarly
750 for the other optimizations below. */
751 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
752 && FLOAT_MODE_P (mode
) && ! flag_fast_math
)
755 if (op1
== CONST0_RTX (mode
))
758 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
759 if (GET_CODE (op0
) == NEG
)
760 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
761 else if (GET_CODE (op1
) == NEG
)
762 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
764 /* Handle both-operands-constant cases. We can only add
765 CONST_INTs to constants since the sum of relocatable symbols
766 can't be handled by most assemblers. Don't add CONST_INT
767 to CONST_INT since overflow won't be computed properly if wider
768 than HOST_BITS_PER_WIDE_INT. */
770 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
771 && GET_CODE (op1
) == CONST_INT
)
772 return plus_constant (op0
, INTVAL (op1
));
773 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
774 && GET_CODE (op0
) == CONST_INT
)
775 return plus_constant (op1
, INTVAL (op0
));
777 /* See if this is something like X * C - X or vice versa or
778 if the multiplication is written as a shift. If so, we can
779 distribute and make a new multiply, shift, or maybe just
780 have X (if C is 2 in the example above). But don't make
781 real multiply if we didn't have one before. */
783 if (! FLOAT_MODE_P (mode
))
785 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
786 rtx lhs
= op0
, rhs
= op1
;
789 if (GET_CODE (lhs
) == NEG
)
790 coeff0
= -1, lhs
= XEXP (lhs
, 0);
791 else if (GET_CODE (lhs
) == MULT
792 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
794 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
797 else if (GET_CODE (lhs
) == ASHIFT
798 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
799 && INTVAL (XEXP (lhs
, 1)) >= 0
800 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
802 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
806 if (GET_CODE (rhs
) == NEG
)
807 coeff1
= -1, rhs
= XEXP (rhs
, 0);
808 else if (GET_CODE (rhs
) == MULT
809 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
811 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
814 else if (GET_CODE (rhs
) == ASHIFT
815 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
816 && INTVAL (XEXP (rhs
, 1)) >= 0
817 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
819 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
823 if (rtx_equal_p (lhs
, rhs
))
825 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
826 GEN_INT (coeff0
+ coeff1
));
827 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
831 /* If one of the operands is a PLUS or a MINUS, see if we can
832 simplify this by the associative law.
833 Don't use the associative law for floating point.
834 The inaccuracy makes it nonassociative,
835 and subtle programs can break if operations are associated. */
837 if (INTEGRAL_MODE_P (mode
)
838 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
839 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
840 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
846 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
847 using cc0, in which case we want to leave it as a COMPARE
848 so we can distinguish it from a register-register-copy.
850 In IEEE floating point, x-0 is not the same as x. */
852 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
853 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
854 && op1
== CONST0_RTX (mode
))
857 /* Do nothing here. */
862 /* None of these optimizations can be done for IEEE
864 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
865 && FLOAT_MODE_P (mode
) && ! flag_fast_math
)
868 /* We can't assume x-x is 0 even with non-IEEE floating point,
869 but since it is zero except in very strange circumstances, we
870 will treat it as zero with -ffast-math. */
871 if (rtx_equal_p (op0
, op1
)
872 && ! side_effects_p (op0
)
873 && (! FLOAT_MODE_P (mode
) || flag_fast_math
))
874 return CONST0_RTX (mode
);
876 /* Change subtraction from zero into negation. */
877 if (op0
== CONST0_RTX (mode
))
878 return gen_rtx_NEG (mode
, op1
);
880 /* (-1 - a) is ~a. */
881 if (op0
== constm1_rtx
)
882 return gen_rtx_NOT (mode
, op1
);
884 /* Subtracting 0 has no effect. */
885 if (op1
== CONST0_RTX (mode
))
888 /* See if this is something like X * C - X or vice versa or
889 if the multiplication is written as a shift. If so, we can
890 distribute and make a new multiply, shift, or maybe just
891 have X (if C is 2 in the example above). But don't make
892 real multiply if we didn't have one before. */
894 if (! FLOAT_MODE_P (mode
))
896 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
897 rtx lhs
= op0
, rhs
= op1
;
900 if (GET_CODE (lhs
) == NEG
)
901 coeff0
= -1, lhs
= XEXP (lhs
, 0);
902 else if (GET_CODE (lhs
) == MULT
903 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
905 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
908 else if (GET_CODE (lhs
) == ASHIFT
909 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
910 && INTVAL (XEXP (lhs
, 1)) >= 0
911 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
913 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
917 if (GET_CODE (rhs
) == NEG
)
918 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
919 else if (GET_CODE (rhs
) == MULT
920 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
922 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
925 else if (GET_CODE (rhs
) == ASHIFT
926 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
927 && INTVAL (XEXP (rhs
, 1)) >= 0
928 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
930 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
934 if (rtx_equal_p (lhs
, rhs
))
936 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
937 GEN_INT (coeff0
- coeff1
));
938 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
942 /* (a - (-b)) -> (a + b). */
943 if (GET_CODE (op1
) == NEG
)
944 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
946 /* If one of the operands is a PLUS or a MINUS, see if we can
947 simplify this by the associative law.
948 Don't use the associative law for floating point.
949 The inaccuracy makes it nonassociative,
950 and subtle programs can break if operations are associated. */
952 if (INTEGRAL_MODE_P (mode
)
953 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
954 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
955 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
958 /* Don't let a relocatable value get a negative coeff. */
959 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
960 return plus_constant (op0
, - INTVAL (op1
));
962 /* (x - (x & y)) -> (x & ~y) */
963 if (GET_CODE (op1
) == AND
)
965 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
966 return simplify_gen_binary (AND
, mode
, op0
,
967 gen_rtx_NOT (mode
, XEXP (op1
, 1)));
968 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
969 return simplify_gen_binary (AND
, mode
, op0
,
970 gen_rtx_NOT (mode
, XEXP (op1
, 0)));
975 if (op1
== constm1_rtx
)
977 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
979 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
982 /* In IEEE floating point, x*0 is not always 0. */
983 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
984 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
985 && op1
== CONST0_RTX (mode
)
986 && ! side_effects_p (op0
))
989 /* In IEEE floating point, x*1 is not equivalent to x for nans.
990 However, ANSI says we can drop signals,
991 so we can do this anyway. */
992 if (op1
== CONST1_RTX (mode
))
995 /* Convert multiply by constant power of two into shift unless
996 we are still generating RTL. This test is a kludge. */
997 if (GET_CODE (op1
) == CONST_INT
998 && (val
= exact_log2 (INTVAL (op1
))) >= 0
999 /* If the mode is larger than the host word size, and the
1000 uppermost bit is set, then this isn't a power of two due
1001 to implicit sign extension. */
1002 && (width
<= HOST_BITS_PER_WIDE_INT
1003 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1004 && ! rtx_equal_function_value_matters
)
1005 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1007 if (GET_CODE (op1
) == CONST_DOUBLE
1008 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
)
1012 int op1is2
, op1ism1
;
1014 if (setjmp (handler
))
1017 set_float_handler (handler
);
1018 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1019 op1is2
= REAL_VALUES_EQUAL (d
, dconst2
);
1020 op1ism1
= REAL_VALUES_EQUAL (d
, dconstm1
);
1021 set_float_handler (NULL_PTR
);
1023 /* x*2 is x+x and x*(-1) is -x */
1024 if (op1is2
&& GET_MODE (op0
) == mode
)
1025 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1027 else if (op1ism1
&& GET_MODE (op0
) == mode
)
1028 return gen_rtx_NEG (mode
, op0
);
1033 if (op1
== const0_rtx
)
1035 if (GET_CODE (op1
) == CONST_INT
1036 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1038 if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1040 /* A | (~A) -> -1 */
1041 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1042 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1043 && ! side_effects_p (op0
)
1044 && GET_MODE_CLASS (mode
) != MODE_CC
)
1049 if (op1
== const0_rtx
)
1051 if (GET_CODE (op1
) == CONST_INT
1052 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1053 return gen_rtx_NOT (mode
, op0
);
1054 if (op0
== op1
&& ! side_effects_p (op0
)
1055 && GET_MODE_CLASS (mode
) != MODE_CC
)
1060 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1062 if (GET_CODE (op1
) == CONST_INT
1063 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1065 if (op0
== op1
&& ! side_effects_p (op0
)
1066 && GET_MODE_CLASS (mode
) != MODE_CC
)
1069 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1070 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1071 && ! side_effects_p (op0
)
1072 && GET_MODE_CLASS (mode
) != MODE_CC
)
1077 /* Convert divide by power of two into shift (divide by 1 handled
1079 if (GET_CODE (op1
) == CONST_INT
1080 && (arg1
= exact_log2 (INTVAL (op1
))) > 0)
1081 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1083 /* ... fall through ... */
1086 if (op1
== CONST1_RTX (mode
))
1089 /* In IEEE floating point, 0/x is not always 0. */
1090 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1091 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
1092 && op0
== CONST0_RTX (mode
)
1093 && ! side_effects_p (op1
))
1096 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1097 /* Change division by a constant into multiplication. Only do
1098 this with -ffast-math until an expert says it is safe in
1100 else if (GET_CODE (op1
) == CONST_DOUBLE
1101 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
1102 && op1
!= CONST0_RTX (mode
)
1106 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1108 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1110 #if defined (REAL_ARITHMETIC)
1111 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1112 return gen_rtx_MULT (mode
, op0
,
1113 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1116 gen_rtx_MULT (mode
, op0
,
1117 CONST_DOUBLE_FROM_REAL_VALUE (1./d
, mode
));
1125 /* Handle modulus by power of two (mod with 1 handled below). */
1126 if (GET_CODE (op1
) == CONST_INT
1127 && exact_log2 (INTVAL (op1
)) > 0)
1128 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1130 /* ... fall through ... */
1133 if ((op0
== const0_rtx
|| op1
== const1_rtx
)
1134 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1140 /* Rotating ~0 always results in ~0. */
1141 if (GET_CODE (op0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1142 && (unsigned HOST_WIDE_INT
) INTVAL (op0
) == GET_MODE_MASK (mode
)
1143 && ! side_effects_p (op1
))
1146 /* ... fall through ... */
1151 if (op1
== const0_rtx
)
1153 if (op0
== const0_rtx
&& ! side_effects_p (op1
))
1158 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1159 && INTVAL (op1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1160 && ! side_effects_p (op0
))
1162 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1167 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1168 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
1169 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1170 && ! side_effects_p (op0
))
1172 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1177 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1179 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1184 if (op1
== constm1_rtx
&& ! side_effects_p (op0
))
1186 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1197 /* Get the integer argument values in two forms:
1198 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1200 arg0
= INTVAL (op0
);
1201 arg1
= INTVAL (op1
);
1203 if (width
< HOST_BITS_PER_WIDE_INT
)
1205 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1206 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1209 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1210 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1213 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1214 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1222 /* Compute the value of the arithmetic. */
1227 val
= arg0s
+ arg1s
;
1231 val
= arg0s
- arg1s
;
1235 val
= arg0s
* arg1s
;
1241 val
= arg0s
/ arg1s
;
1247 val
= arg0s
% arg1s
;
1253 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1259 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1275 /* If shift count is undefined, don't fold it; let the machine do
1276 what it wants. But truncate it if the machine will do that. */
1280 #ifdef SHIFT_COUNT_TRUNCATED
1281 if (SHIFT_COUNT_TRUNCATED
)
1285 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1292 #ifdef SHIFT_COUNT_TRUNCATED
1293 if (SHIFT_COUNT_TRUNCATED
)
1297 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1304 #ifdef SHIFT_COUNT_TRUNCATED
1305 if (SHIFT_COUNT_TRUNCATED
)
1309 val
= arg0s
>> arg1
;
1311 /* Bootstrap compiler may not have sign extended the right shift.
1312 Manually extend the sign to insure bootstrap cc matches gcc. */
1313 if (arg0s
< 0 && arg1
> 0)
1314 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1323 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1324 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1332 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1333 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1337 /* Do nothing here. */
1341 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1345 val
= ((unsigned HOST_WIDE_INT
) arg0
1346 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1350 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1354 val
= ((unsigned HOST_WIDE_INT
) arg0
1355 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1362 val
= trunc_int_for_mode (val
, mode
);
1364 return GEN_INT (val
);
1367 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1370 Rather than test for specific case, we do this by a brute-force method
1371 and do all possible simplifications until no more changes occur. Then
1372 we rebuild the operation. */
1375 simplify_plus_minus (code
, mode
, op0
, op1
)
1377 enum machine_mode mode
;
1383 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
= 0;
1384 int first
= 1, negate
= 0, changed
;
1387 bzero ((char *) ops
, sizeof ops
);
1389 /* Set up the two operands and then expand them until nothing has been
1390 changed. If we run out of room in our array, give up; this should
1391 almost never happen. */
1393 ops
[0] = op0
, ops
[1] = op1
, negs
[0] = 0, negs
[1] = (code
== MINUS
);
1400 for (i
= 0; i
< n_ops
; i
++)
1401 switch (GET_CODE (ops
[i
]))
1408 ops
[n_ops
] = XEXP (ops
[i
], 1);
1409 negs
[n_ops
++] = GET_CODE (ops
[i
]) == MINUS
? !negs
[i
] : negs
[i
];
1410 ops
[i
] = XEXP (ops
[i
], 0);
1416 ops
[i
] = XEXP (ops
[i
], 0);
1417 negs
[i
] = ! negs
[i
];
1422 ops
[i
] = XEXP (ops
[i
], 0);
1428 /* ~a -> (-a - 1) */
1431 ops
[n_ops
] = constm1_rtx
;
1432 negs
[n_ops
++] = negs
[i
];
1433 ops
[i
] = XEXP (ops
[i
], 0);
1434 negs
[i
] = ! negs
[i
];
1441 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0, changed
= 1;
1449 /* If we only have two operands, we can't do anything. */
1453 /* Now simplify each pair of operands until nothing changes. The first
1454 time through just simplify constants against each other. */
1461 for (i
= 0; i
< n_ops
- 1; i
++)
1462 for (j
= i
+ 1; j
< n_ops
; j
++)
1463 if (ops
[i
] != 0 && ops
[j
] != 0
1464 && (! first
|| (CONSTANT_P (ops
[i
]) && CONSTANT_P (ops
[j
]))))
1466 rtx lhs
= ops
[i
], rhs
= ops
[j
];
1467 enum rtx_code ncode
= PLUS
;
1469 if (negs
[i
] && ! negs
[j
])
1470 lhs
= ops
[j
], rhs
= ops
[i
], ncode
= MINUS
;
1471 else if (! negs
[i
] && negs
[j
])
1474 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
1477 ops
[i
] = tem
, ops
[j
] = 0;
1478 negs
[i
] = negs
[i
] && negs
[j
];
1479 if (GET_CODE (tem
) == NEG
)
1480 ops
[i
] = XEXP (tem
, 0), negs
[i
] = ! negs
[i
];
1482 if (GET_CODE (ops
[i
]) == CONST_INT
&& negs
[i
])
1483 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0;
1491 /* Pack all the operands to the lower-numbered entries and give up if
1492 we didn't reduce the number of operands we had. Make sure we
1493 count a CONST as two operands. If we have the same number of
1494 operands, but have made more CONSTs than we had, this is also
1495 an improvement, so accept it. */
1497 for (i
= 0, j
= 0; j
< n_ops
; j
++)
1500 ops
[i
] = ops
[j
], negs
[i
++] = negs
[j
];
1501 if (GET_CODE (ops
[j
]) == CONST
)
1505 if (i
+ n_consts
> input_ops
1506 || (i
+ n_consts
== input_ops
&& n_consts
<= input_consts
))
1511 /* If we have a CONST_INT, put it last. */
1512 for (i
= 0; i
< n_ops
- 1; i
++)
1513 if (GET_CODE (ops
[i
]) == CONST_INT
)
1515 tem
= ops
[n_ops
- 1], ops
[n_ops
- 1] = ops
[i
] , ops
[i
] = tem
;
1516 j
= negs
[n_ops
- 1], negs
[n_ops
- 1] = negs
[i
], negs
[i
] = j
;
1519 /* Put a non-negated operand first. If there aren't any, make all
1520 operands positive and negate the whole thing later. */
1521 for (i
= 0; i
< n_ops
&& negs
[i
]; i
++)
1526 for (i
= 0; i
< n_ops
; i
++)
1532 tem
= ops
[0], ops
[0] = ops
[i
], ops
[i
] = tem
;
1533 j
= negs
[0], negs
[0] = negs
[i
], negs
[i
] = j
;
1536 /* Now make the result by performing the requested operations. */
1538 for (i
= 1; i
< n_ops
; i
++)
1539 result
= simplify_gen_binary (negs
[i
] ? MINUS
: PLUS
, mode
, result
, ops
[i
]);
1541 return negate
? gen_rtx_NEG (mode
, result
) : result
;
1546 rtx op0
, op1
; /* Input */
1547 int equal
, op0lt
, op1lt
; /* Output */
1551 check_fold_consts (data
)
1554 struct cfc_args
*args
= (struct cfc_args
*) data
;
1555 REAL_VALUE_TYPE d0
, d1
;
1557 REAL_VALUE_FROM_CONST_DOUBLE (d0
, args
->op0
);
1558 REAL_VALUE_FROM_CONST_DOUBLE (d1
, args
->op1
);
1559 args
->equal
= REAL_VALUES_EQUAL (d0
, d1
);
1560 args
->op0lt
= REAL_VALUES_LESS (d0
, d1
);
1561 args
->op1lt
= REAL_VALUES_LESS (d1
, d0
);
1564 /* Like simplify_binary_operation except used for relational operators.
1565 MODE is the mode of the operands, not that of the result. If MODE
1566 is VOIDmode, both operands must also be VOIDmode and we compare the
1567 operands in "infinite precision".
1569 If no simplification is possible, this function returns zero. Otherwise,
1570 it returns either const_true_rtx or const0_rtx. */
1573 simplify_relational_operation (code
, mode
, op0
, op1
)
1575 enum machine_mode mode
;
1578 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
1581 /* If op0 is a compare, extract the comparison arguments from it. */
1582 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
1583 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
1585 /* We can't simplify MODE_CC values since we don't know what the
1586 actual comparison is. */
1587 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
1594 /* For integer comparisons of A and B maybe we can simplify A - B and can
1595 then simplify a comparison of that with zero. If A and B are both either
1596 a register or a CONST_INT, this can't help; testing for these cases will
1597 prevent infinite recursion here and speed things up.
1599 If CODE is an unsigned comparison, then we can never do this optimization,
1600 because it gives an incorrect result if the subtraction wraps around zero.
1601 ANSI C defines unsigned operations such that they never overflow, and
1602 thus such cases can not be ignored. */
1604 if (INTEGRAL_MODE_P (mode
) && op1
!= const0_rtx
1605 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == CONST_INT
)
1606 && (GET_CODE (op1
) == REG
|| GET_CODE (op1
) == CONST_INT
))
1607 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
1608 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
1609 return simplify_relational_operation (signed_condition (code
),
1610 mode
, tem
, const0_rtx
);
1612 /* For non-IEEE floating-point, if the two operands are equal, we know the
1614 if (rtx_equal_p (op0
, op1
)
1615 && (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1616 || ! FLOAT_MODE_P (GET_MODE (op0
)) || flag_fast_math
))
1617 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
1619 /* If the operands are floating-point constants, see if we can fold
1621 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1622 else if (GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
1623 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
1625 struct cfc_args args
;
1627 /* Setup input for check_fold_consts() */
1631 if (do_float_handler(check_fold_consts
, (PTR
) &args
) == 0)
1632 /* We got an exception from check_fold_consts() */
1635 /* Receive output from check_fold_consts() */
1637 op0lt
= op0ltu
= args
.op0lt
;
1638 op1lt
= op1ltu
= args
.op1lt
;
1640 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1642 /* Otherwise, see if the operands are both integers. */
1643 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
1644 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
1645 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
1647 int width
= GET_MODE_BITSIZE (mode
);
1648 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
1649 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
1651 /* Get the two words comprising each integer constant. */
1652 if (GET_CODE (op0
) == CONST_DOUBLE
)
1654 l0u
= l0s
= CONST_DOUBLE_LOW (op0
);
1655 h0u
= h0s
= CONST_DOUBLE_HIGH (op0
);
1659 l0u
= l0s
= INTVAL (op0
);
1660 h0u
= h0s
= l0s
< 0 ? -1 : 0;
1663 if (GET_CODE (op1
) == CONST_DOUBLE
)
1665 l1u
= l1s
= CONST_DOUBLE_LOW (op1
);
1666 h1u
= h1s
= CONST_DOUBLE_HIGH (op1
);
1670 l1u
= l1s
= INTVAL (op1
);
1671 h1u
= h1s
= l1s
< 0 ? -1 : 0;
1674 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1675 we have to sign or zero-extend the values. */
1676 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
1677 h0u
= h1u
= 0, h0s
= l0s
< 0 ? -1 : 0, h1s
= l1s
< 0 ? -1 : 0;
1679 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
1681 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1682 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1684 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1685 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1687 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1688 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1691 equal
= (h0u
== h1u
&& l0u
== l1u
);
1692 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0s
< l1s
));
1693 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1s
< l0s
));
1694 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
1695 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
1698 /* Otherwise, there are some code-specific tests we can make. */
1704 /* References to the frame plus a constant or labels cannot
1705 be zero, but a SYMBOL_REF can due to #pragma weak. */
1706 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1707 || GET_CODE (op0
) == LABEL_REF
)
1708 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1709 /* On some machines, the ap reg can be 0 sometimes. */
1710 && op0
!= arg_pointer_rtx
1717 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1718 || GET_CODE (op0
) == LABEL_REF
)
1719 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1720 && op0
!= arg_pointer_rtx
1723 return const_true_rtx
;
1727 /* Unsigned values are never negative. */
1728 if (op1
== const0_rtx
)
1729 return const_true_rtx
;
1733 if (op1
== const0_rtx
)
1738 /* Unsigned values are never greater than the largest
1740 if (GET_CODE (op1
) == CONST_INT
1741 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
1742 && INTEGRAL_MODE_P (mode
))
1743 return const_true_rtx
;
1747 if (GET_CODE (op1
) == CONST_INT
1748 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
1749 && INTEGRAL_MODE_P (mode
))
1760 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
1765 return equal
? const_true_rtx
: const0_rtx
;
1767 return ! equal
? const_true_rtx
: const0_rtx
;
1769 return op0lt
? const_true_rtx
: const0_rtx
;
1771 return op1lt
? const_true_rtx
: const0_rtx
;
1773 return op0ltu
? const_true_rtx
: const0_rtx
;
1775 return op1ltu
? const_true_rtx
: const0_rtx
;
1777 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
1779 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
1781 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
1783 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
1789 /* Simplify CODE, an operation with result mode MODE and three operands,
1790 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
1791 a constant. Return 0 if no simplifications is possible. */
1794 simplify_ternary_operation (code
, mode
, op0_mode
, op0
, op1
, op2
)
1796 enum machine_mode mode
, op0_mode
;
1799 int width
= GET_MODE_BITSIZE (mode
);
1801 /* VOIDmode means "infinite" precision. */
1803 width
= HOST_BITS_PER_WIDE_INT
;
1809 if (GET_CODE (op0
) == CONST_INT
1810 && GET_CODE (op1
) == CONST_INT
1811 && GET_CODE (op2
) == CONST_INT
1812 && INTVAL (op1
) + INTVAL (op2
) <= GET_MODE_BITSIZE (op0_mode
)
1813 && width
<= HOST_BITS_PER_WIDE_INT
)
1815 /* Extracting a bit-field from a constant */
1816 HOST_WIDE_INT val
= INTVAL (op0
);
1818 if (BITS_BIG_ENDIAN
)
1819 val
>>= (GET_MODE_BITSIZE (op0_mode
)
1820 - INTVAL (op2
) - INTVAL (op1
));
1822 val
>>= INTVAL (op2
);
1824 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
1826 /* First zero-extend. */
1827 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
1828 /* If desired, propagate sign bit. */
1829 if (code
== SIGN_EXTRACT
1830 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
1831 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
1834 /* Clear the bits that don't belong in our mode,
1835 unless they and our sign bit are all one.
1836 So we get either a reasonable negative value or a reasonable
1837 unsigned value for this mode. */
1838 if (width
< HOST_BITS_PER_WIDE_INT
1839 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
1840 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
1841 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1843 return GEN_INT (val
);
1848 if (GET_CODE (op0
) == CONST_INT
)
1849 return op0
!= const0_rtx
? op1
: op2
;
1851 /* Convert a == b ? b : a to "a". */
1852 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
1853 && rtx_equal_p (XEXP (op0
, 0), op1
)
1854 && rtx_equal_p (XEXP (op0
, 1), op2
))
1856 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
1857 && rtx_equal_p (XEXP (op0
, 1), op1
)
1858 && rtx_equal_p (XEXP (op0
, 0), op2
))
1860 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
1863 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
1864 XEXP (op0
, 0), XEXP (op0
, 1));
1865 /* See if any simplifications were possible. */
1866 if (temp
== const0_rtx
)
1868 else if (temp
== const1_rtx
)
1880 /* Simplify X, an rtx expression.
1882 Return the simplified expression or NULL if no simplifications
1885 This is the preferred entry point into the simplification routines;
1886 however, we still allow passes to call the more specific routines.
1888 Right now GCC has three (yes, three) major bodies of RTL simplficiation
1889 code that need to be unified.
1891 1. fold_rtx in cse.c. This code uses various CSE specific
1892 information to aid in RTL simplification.
1894 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
1895 it uses combine specific information to aid in RTL
1898 3. The routines in this file.
1901 Long term we want to only have one body of simplification code; to
1902 get to that state I recommend the following steps:
1904 1. Pour over fold_rtx & simplify_rtx and move any simplifications
1905 which are not pass dependent state into these routines.
1907 2. As code is moved by #1, change fold_rtx & simplify_rtx to
1908 use this routine whenever possible.
1910 3. Allow for pass dependent state to be provided to these
1911 routines and add simplifications based on the pass dependent
1912 state. Remove code from cse.c & combine.c that becomes
1915 It will take time, but ultimately the compiler will be easier to
1916 maintain and improve. It's totally silly that when we add a
1917 simplification that it needs to be added to 4 places (3 for RTL
1918 simplification and 1 for tree simplification. */
1925 enum machine_mode mode
;
1927 mode
= GET_MODE (x
);
1928 code
= GET_CODE (x
);
1930 switch (GET_RTX_CLASS (code
))
1933 return simplify_unary_operation (code
, mode
,
1934 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
1937 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
1941 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
1942 XEXP (x
, 0), XEXP (x
, 1), XEXP (x
, 2));
1945 return simplify_relational_operation (code
, GET_MODE (XEXP (x
, 0)),
1946 XEXP (x
, 0), XEXP (x
, 1));