1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
29 #include "hard-reg-set.h"
32 #include "insn-config.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
65 /* Similar, but also allows reference to the stack pointer.
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
91 /* Much code operates on (low, high) pairs; the low value is an
92 unsigned wide int, the high value a signed wide int. We
93 occasionally need to sign extend from low to high as if low were a
95 #define HWI_SIGN_EXTEND(low) \
96 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98 static rtx neg_const_int
PARAMS ((enum machine_mode
, rtx
));
99 static int simplify_plus_minus_op_data_cmp
PARAMS ((const void *,
101 static rtx simplify_plus_minus
PARAMS ((enum rtx_code
,
102 enum machine_mode
, rtx
,
105 /* Negate a CONST_INT rtx, truncating (because a conversion from a
106 maximally negative number can overflow). */
108 neg_const_int (mode
, i
)
109 enum machine_mode mode
;
112 return gen_int_mode (- INTVAL (i
), mode
);
116 /* Make a binary operation by properly ordering the operands and
117 seeing if the expression folds. */
120 simplify_gen_binary (code
, mode
, op0
, op1
)
122 enum machine_mode mode
;
127 /* Put complex operands first and constants second if commutative. */
128 if (GET_RTX_CLASS (code
) == 'c'
129 && swap_commutative_operands_p (op0
, op1
))
130 tem
= op0
, op0
= op1
, op1
= tem
;
132 /* If this simplifies, do it. */
133 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
137 /* Handle addition and subtraction specially. Otherwise, just form
140 if (code
== PLUS
|| code
== MINUS
)
142 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
147 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
150 /* If X is a MEM referencing the constant pool, return the real value.
151 Otherwise return X. */
153 avoid_constant_pool_reference (x
)
157 enum machine_mode cmode
;
159 if (GET_CODE (x
) != MEM
)
163 if (GET_CODE (addr
) != SYMBOL_REF
164 || ! CONSTANT_POOL_ADDRESS_P (addr
))
167 c
= get_pool_constant (addr
);
168 cmode
= get_pool_mode (addr
);
170 /* If we're accessing the constant in a different mode than it was
171 originally stored, attempt to fix that up via subreg simplifications.
172 If that fails we have no choice but to return the original memory. */
173 if (cmode
!= GET_MODE (x
))
175 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
182 /* Make a unary operation by first seeing if it folds and otherwise making
183 the specified operation. */
186 simplify_gen_unary (code
, mode
, op
, op_mode
)
188 enum machine_mode mode
;
190 enum machine_mode op_mode
;
194 /* If this simplifies, use it. */
195 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
198 return gen_rtx_fmt_e (code
, mode
, op
);
201 /* Likewise for ternary operations. */
204 simplify_gen_ternary (code
, mode
, op0_mode
, op0
, op1
, op2
)
206 enum machine_mode mode
, op0_mode
;
211 /* If this simplifies, use it. */
212 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
216 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
219 /* Likewise, for relational operations.
220 CMP_MODE specifies mode comparison is done in.
224 simplify_gen_relational (code
, mode
, cmp_mode
, op0
, op1
)
226 enum machine_mode mode
;
227 enum machine_mode cmp_mode
;
232 if ((tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
)) != 0)
235 /* If op0 is a compare, extract the comparison arguments from it. */
236 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
237 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
239 /* Put complex operands first and constants second. */
240 if (swap_commutative_operands_p (op0
, op1
))
241 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
243 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
246 /* Replace all occurrences of OLD in X with NEW and try to simplify the
247 resulting RTX. Return a new RTX which is as simplified as possible. */
250 simplify_replace_rtx (x
, old
, new)
255 enum rtx_code code
= GET_CODE (x
);
256 enum machine_mode mode
= GET_MODE (x
);
258 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
259 to build a new expression substituting recursively. If we can't do
260 anything, return our input. */
265 switch (GET_RTX_CLASS (code
))
269 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
270 rtx op
= (XEXP (x
, 0) == old
271 ? new : simplify_replace_rtx (XEXP (x
, 0), old
, new));
273 return simplify_gen_unary (code
, mode
, op
, op_mode
);
279 simplify_gen_binary (code
, mode
,
280 simplify_replace_rtx (XEXP (x
, 0), old
, new),
281 simplify_replace_rtx (XEXP (x
, 1), old
, new));
284 enum machine_mode op_mode
= (GET_MODE (XEXP (x
, 0)) != VOIDmode
285 ? GET_MODE (XEXP (x
, 0))
286 : GET_MODE (XEXP (x
, 1)));
287 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
288 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
291 simplify_gen_relational (code
, mode
,
294 : GET_MODE (op0
) != VOIDmode
303 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
304 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
307 simplify_gen_ternary (code
, mode
,
312 simplify_replace_rtx (XEXP (x
, 1), old
, new),
313 simplify_replace_rtx (XEXP (x
, 2), old
, new));
317 /* The only case we try to handle is a SUBREG. */
321 exp
= simplify_gen_subreg (GET_MODE (x
),
322 simplify_replace_rtx (SUBREG_REG (x
),
324 GET_MODE (SUBREG_REG (x
)),
332 if (GET_CODE (x
) == MEM
)
334 replace_equiv_address_nv (x
,
335 simplify_replace_rtx (XEXP (x
, 0),
343 /* Try to simplify a unary operation CODE whose output mode is to be
344 MODE with input operand OP whose mode was originally OP_MODE.
345 Return zero if no simplification can be made. */
347 simplify_unary_operation (code
, mode
, op
, op_mode
)
349 enum machine_mode mode
;
351 enum machine_mode op_mode
;
353 unsigned int width
= GET_MODE_BITSIZE (mode
);
354 rtx trueop
= avoid_constant_pool_reference (op
);
356 /* The order of these tests is critical so that, for example, we don't
357 check the wrong mode (input vs. output) for a conversion operation,
358 such as FIX. At some point, this should be simplified. */
360 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
361 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
363 HOST_WIDE_INT hv
, lv
;
366 if (GET_CODE (trueop
) == CONST_INT
)
367 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
369 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
371 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
372 d
= real_value_truncate (mode
, d
);
373 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
375 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
376 && (GET_CODE (trueop
) == CONST_DOUBLE
377 || GET_CODE (trueop
) == CONST_INT
))
379 HOST_WIDE_INT hv
, lv
;
382 if (GET_CODE (trueop
) == CONST_INT
)
383 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
385 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
387 if (op_mode
== VOIDmode
)
389 /* We don't know how to interpret negative-looking numbers in
390 this case, so don't try to fold those. */
394 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
397 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
399 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
400 d
= real_value_truncate (mode
, d
);
401 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
404 if (GET_CODE (trueop
) == CONST_INT
405 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
407 HOST_WIDE_INT arg0
= INTVAL (trueop
);
421 val
= (arg0
>= 0 ? arg0
: - arg0
);
425 /* Don't use ffs here. Instead, get low order bit and then its
426 number. If arg0 is zero, this will return 0, as desired. */
427 arg0
&= GET_MODE_MASK (mode
);
428 val
= exact_log2 (arg0
& (- arg0
)) + 1;
436 /* When zero-extending a CONST_INT, we need to know its
438 if (op_mode
== VOIDmode
)
440 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
442 /* If we were really extending the mode,
443 we would have to distinguish between zero-extension
444 and sign-extension. */
445 if (width
!= GET_MODE_BITSIZE (op_mode
))
449 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
450 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
456 if (op_mode
== VOIDmode
)
458 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
460 /* If we were really extending the mode,
461 we would have to distinguish between zero-extension
462 and sign-extension. */
463 if (width
!= GET_MODE_BITSIZE (op_mode
))
467 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
470 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
472 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
473 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
490 val
= trunc_int_for_mode (val
, mode
);
492 return GEN_INT (val
);
495 /* We can do some operations on integer CONST_DOUBLEs. Also allow
496 for a DImode operation on a CONST_INT. */
497 else if (GET_MODE (trueop
) == VOIDmode
498 && width
<= HOST_BITS_PER_WIDE_INT
* 2
499 && (GET_CODE (trueop
) == CONST_DOUBLE
500 || GET_CODE (trueop
) == CONST_INT
))
502 unsigned HOST_WIDE_INT l1
, lv
;
503 HOST_WIDE_INT h1
, hv
;
505 if (GET_CODE (trueop
) == CONST_DOUBLE
)
506 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
508 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
518 neg_double (l1
, h1
, &lv
, &hv
);
523 neg_double (l1
, h1
, &lv
, &hv
);
531 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& (-h1
)) + 1;
533 lv
= exact_log2 (l1
& (-l1
)) + 1;
537 /* This is just a change-of-mode, so do nothing. */
542 if (op_mode
== VOIDmode
)
545 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
549 lv
= l1
& GET_MODE_MASK (op_mode
);
553 if (op_mode
== VOIDmode
554 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
558 lv
= l1
& GET_MODE_MASK (op_mode
);
559 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
560 && (lv
& ((HOST_WIDE_INT
) 1
561 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
562 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
564 hv
= HWI_SIGN_EXTEND (lv
);
575 return immed_double_const (lv
, hv
, mode
);
578 else if (GET_CODE (trueop
) == CONST_DOUBLE
579 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
582 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
587 /* We don't attempt to optimize this. */
590 case ABS
: d
= REAL_VALUE_ABS (d
); break;
591 case NEG
: d
= REAL_VALUE_NEGATE (d
); break;
592 case FLOAT_TRUNCATE
: d
= real_value_truncate (mode
, d
); break;
593 case FLOAT_EXTEND
: /* All this does is change the mode. */ break;
594 case FIX
: d
= REAL_VALUE_RNDZINT (d
); break;
595 case UNSIGNED_FIX
: d
= REAL_VALUE_UNSIGNED_RNDZINT (d
); break;
599 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
602 else if (GET_CODE (trueop
) == CONST_DOUBLE
603 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
604 && GET_MODE_CLASS (mode
) == MODE_INT
605 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
609 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
612 case FIX
: i
= REAL_VALUE_FIX (d
); break;
613 case UNSIGNED_FIX
: i
= REAL_VALUE_UNSIGNED_FIX (d
); break;
617 return gen_int_mode (i
, mode
);
620 /* This was formerly used only for non-IEEE float.
621 eggert@twinsun.com says it is safe for IEEE also. */
624 enum rtx_code reversed
;
625 /* There are some simplifications we can do even if the operands
630 /* (not (not X)) == X. */
631 if (GET_CODE (op
) == NOT
)
634 /* (not (eq X Y)) == (ne X Y), etc. */
635 if (mode
== BImode
&& GET_RTX_CLASS (GET_CODE (op
)) == '<'
636 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
638 return gen_rtx_fmt_ee (reversed
,
639 op_mode
, XEXP (op
, 0), XEXP (op
, 1));
643 /* (neg (neg X)) == X. */
644 if (GET_CODE (op
) == NEG
)
649 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
650 becomes just the MINUS if its mode is MODE. This allows
651 folding switch statements on machines using casesi (such as
653 if (GET_CODE (op
) == TRUNCATE
654 && GET_MODE (XEXP (op
, 0)) == mode
655 && GET_CODE (XEXP (op
, 0)) == MINUS
656 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
657 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
660 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
661 if (! POINTERS_EXTEND_UNSIGNED
662 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
664 || (GET_CODE (op
) == SUBREG
665 && GET_CODE (SUBREG_REG (op
)) == REG
666 && REG_POINTER (SUBREG_REG (op
))
667 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
668 return convert_memory_address (Pmode
, op
);
672 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
674 if (POINTERS_EXTEND_UNSIGNED
> 0
675 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
677 || (GET_CODE (op
) == SUBREG
678 && GET_CODE (SUBREG_REG (op
)) == REG
679 && REG_POINTER (SUBREG_REG (op
))
680 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
681 return convert_memory_address (Pmode
, op
);
693 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
694 and OP1. Return 0 if no simplification is possible.
696 Don't use this for relational operations such as EQ or LT.
697 Use simplify_relational_operation instead. */
699 simplify_binary_operation (code
, mode
, op0
, op1
)
701 enum machine_mode mode
;
704 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
706 unsigned int width
= GET_MODE_BITSIZE (mode
);
708 rtx trueop0
= avoid_constant_pool_reference (op0
);
709 rtx trueop1
= avoid_constant_pool_reference (op1
);
711 /* Relational operations don't work here. We must know the mode
712 of the operands in order to do the comparison correctly.
713 Assuming a full word can give incorrect results.
714 Consider comparing 128 with -128 in QImode. */
716 if (GET_RTX_CLASS (code
) == '<')
719 /* Make sure the constant is second. */
720 if (GET_RTX_CLASS (code
) == 'c'
721 && swap_commutative_operands_p (trueop0
, trueop1
))
723 tem
= op0
, op0
= op1
, op1
= tem
;
724 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
727 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
728 && GET_CODE (trueop0
) == CONST_DOUBLE
729 && GET_CODE (trueop1
) == CONST_DOUBLE
730 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
732 REAL_VALUE_TYPE f0
, f1
, value
;
734 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
735 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
736 f0
= real_value_truncate (mode
, f0
);
737 f1
= real_value_truncate (mode
, f1
);
740 && !MODE_HAS_INFINITIES (mode
)
741 && REAL_VALUES_EQUAL (f1
, dconst0
))
744 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
746 value
= real_value_truncate (mode
, value
);
747 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
750 /* We can fold some multi-word operations. */
751 if (GET_MODE_CLASS (mode
) == MODE_INT
752 && width
== HOST_BITS_PER_WIDE_INT
* 2
753 && (GET_CODE (trueop0
) == CONST_DOUBLE
754 || GET_CODE (trueop0
) == CONST_INT
)
755 && (GET_CODE (trueop1
) == CONST_DOUBLE
756 || GET_CODE (trueop1
) == CONST_INT
))
758 unsigned HOST_WIDE_INT l1
, l2
, lv
;
759 HOST_WIDE_INT h1
, h2
, hv
;
761 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
762 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
764 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
766 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
767 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
769 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
774 /* A - B == A + (-B). */
775 neg_double (l2
, h2
, &lv
, &hv
);
778 /* .. fall through ... */
781 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
785 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
788 case DIV
: case MOD
: case UDIV
: case UMOD
:
789 /* We'd need to include tree.h to do this and it doesn't seem worth
794 lv
= l1
& l2
, hv
= h1
& h2
;
798 lv
= l1
| l2
, hv
= h1
| h2
;
802 lv
= l1
^ l2
, hv
= h1
^ h2
;
808 && ((unsigned HOST_WIDE_INT
) l1
809 < (unsigned HOST_WIDE_INT
) l2
)))
818 && ((unsigned HOST_WIDE_INT
) l1
819 > (unsigned HOST_WIDE_INT
) l2
)))
826 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
828 && ((unsigned HOST_WIDE_INT
) l1
829 < (unsigned HOST_WIDE_INT
) l2
)))
836 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
838 && ((unsigned HOST_WIDE_INT
) l1
839 > (unsigned HOST_WIDE_INT
) l2
)))
845 case LSHIFTRT
: case ASHIFTRT
:
847 case ROTATE
: case ROTATERT
:
848 #ifdef SHIFT_COUNT_TRUNCATED
849 if (SHIFT_COUNT_TRUNCATED
)
850 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
853 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
856 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
857 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
859 else if (code
== ASHIFT
)
860 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
861 else if (code
== ROTATE
)
862 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
863 else /* code == ROTATERT */
864 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
871 return immed_double_const (lv
, hv
, mode
);
874 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
875 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
877 /* Even if we can't compute a constant result,
878 there are some cases worth simplifying. */
883 /* Maybe simplify x + 0 to x. The two expressions are equivalent
884 when x is NaN, infinite, or finite and non-zero. They aren't
885 when x is -0 and the rounding mode is not towards -infinity,
886 since (-0) + 0 is then 0. */
887 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
890 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
891 transformations are safe even for IEEE. */
892 if (GET_CODE (op0
) == NEG
)
893 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
894 else if (GET_CODE (op1
) == NEG
)
895 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
898 if (INTEGRAL_MODE_P (mode
)
899 && GET_CODE (op0
) == NOT
900 && trueop1
== const1_rtx
)
901 return gen_rtx_NEG (mode
, XEXP (op0
, 0));
903 /* Handle both-operands-constant cases. We can only add
904 CONST_INTs to constants since the sum of relocatable symbols
905 can't be handled by most assemblers. Don't add CONST_INT
906 to CONST_INT since overflow won't be computed properly if wider
907 than HOST_BITS_PER_WIDE_INT. */
909 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
910 && GET_CODE (op1
) == CONST_INT
)
911 return plus_constant (op0
, INTVAL (op1
));
912 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
913 && GET_CODE (op0
) == CONST_INT
)
914 return plus_constant (op1
, INTVAL (op0
));
916 /* See if this is something like X * C - X or vice versa or
917 if the multiplication is written as a shift. If so, we can
918 distribute and make a new multiply, shift, or maybe just
919 have X (if C is 2 in the example above). But don't make
920 real multiply if we didn't have one before. */
922 if (! FLOAT_MODE_P (mode
))
924 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
925 rtx lhs
= op0
, rhs
= op1
;
928 if (GET_CODE (lhs
) == NEG
)
929 coeff0
= -1, lhs
= XEXP (lhs
, 0);
930 else if (GET_CODE (lhs
) == MULT
931 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
933 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
936 else if (GET_CODE (lhs
) == ASHIFT
937 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
938 && INTVAL (XEXP (lhs
, 1)) >= 0
939 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
941 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
945 if (GET_CODE (rhs
) == NEG
)
946 coeff1
= -1, rhs
= XEXP (rhs
, 0);
947 else if (GET_CODE (rhs
) == MULT
948 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
950 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
953 else if (GET_CODE (rhs
) == ASHIFT
954 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
955 && INTVAL (XEXP (rhs
, 1)) >= 0
956 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
958 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
962 if (rtx_equal_p (lhs
, rhs
))
964 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
965 GEN_INT (coeff0
+ coeff1
));
966 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
970 /* If one of the operands is a PLUS or a MINUS, see if we can
971 simplify this by the associative law.
972 Don't use the associative law for floating point.
973 The inaccuracy makes it nonassociative,
974 and subtle programs can break if operations are associated. */
976 if (INTEGRAL_MODE_P (mode
)
977 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
978 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
979 || (GET_CODE (op0
) == CONST
980 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
981 || (GET_CODE (op1
) == CONST
982 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
983 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
989 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
990 using cc0, in which case we want to leave it as a COMPARE
991 so we can distinguish it from a register-register-copy.
993 In IEEE floating point, x-0 is not the same as x. */
995 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
996 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
997 && trueop1
== CONST0_RTX (mode
))
1001 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1002 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1003 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1004 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1006 rtx xop00
= XEXP (op0
, 0);
1007 rtx xop10
= XEXP (op1
, 0);
1010 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1012 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1013 && GET_MODE (xop00
) == GET_MODE (xop10
)
1014 && REGNO (xop00
) == REGNO (xop10
)
1015 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1016 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1023 /* We can't assume x-x is 0 even with non-IEEE floating point,
1024 but since it is zero except in very strange circumstances, we
1025 will treat it as zero with -funsafe-math-optimizations. */
1026 if (rtx_equal_p (trueop0
, trueop1
)
1027 && ! side_effects_p (op0
)
1028 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1029 return CONST0_RTX (mode
);
1031 /* Change subtraction from zero into negation. (0 - x) is the
1032 same as -x when x is NaN, infinite, or finite and non-zero.
1033 But if the mode has signed zeros, and does not round towards
1034 -infinity, then 0 - 0 is 0, not -0. */
1035 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1036 return gen_rtx_NEG (mode
, op1
);
1038 /* (-1 - a) is ~a. */
1039 if (trueop0
== constm1_rtx
)
1040 return gen_rtx_NOT (mode
, op1
);
1042 /* Subtracting 0 has no effect unless the mode has signed zeros
1043 and supports rounding towards -infinity. In such a case,
1045 if (!(HONOR_SIGNED_ZEROS (mode
)
1046 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1047 && trueop1
== CONST0_RTX (mode
))
1050 /* See if this is something like X * C - X or vice versa or
1051 if the multiplication is written as a shift. If so, we can
1052 distribute and make a new multiply, shift, or maybe just
1053 have X (if C is 2 in the example above). But don't make
1054 real multiply if we didn't have one before. */
1056 if (! FLOAT_MODE_P (mode
))
1058 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1059 rtx lhs
= op0
, rhs
= op1
;
1062 if (GET_CODE (lhs
) == NEG
)
1063 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1064 else if (GET_CODE (lhs
) == MULT
1065 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1067 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1070 else if (GET_CODE (lhs
) == ASHIFT
1071 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1072 && INTVAL (XEXP (lhs
, 1)) >= 0
1073 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1075 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1076 lhs
= XEXP (lhs
, 0);
1079 if (GET_CODE (rhs
) == NEG
)
1080 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1081 else if (GET_CODE (rhs
) == MULT
1082 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1084 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1087 else if (GET_CODE (rhs
) == ASHIFT
1088 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1089 && INTVAL (XEXP (rhs
, 1)) >= 0
1090 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1092 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1093 rhs
= XEXP (rhs
, 0);
1096 if (rtx_equal_p (lhs
, rhs
))
1098 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1099 GEN_INT (coeff0
- coeff1
));
1100 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1104 /* (a - (-b)) -> (a + b). True even for IEEE. */
1105 if (GET_CODE (op1
) == NEG
)
1106 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1108 /* If one of the operands is a PLUS or a MINUS, see if we can
1109 simplify this by the associative law.
1110 Don't use the associative law for floating point.
1111 The inaccuracy makes it nonassociative,
1112 and subtle programs can break if operations are associated. */
1114 if (INTEGRAL_MODE_P (mode
)
1115 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1116 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1117 || (GET_CODE (op0
) == CONST
1118 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1119 || (GET_CODE (op1
) == CONST
1120 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1121 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1124 /* Don't let a relocatable value get a negative coeff. */
1125 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1126 return simplify_gen_binary (PLUS
, mode
,
1128 neg_const_int (mode
, op1
));
1130 /* (x - (x & y)) -> (x & ~y) */
1131 if (GET_CODE (op1
) == AND
)
1133 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1134 return simplify_gen_binary (AND
, mode
, op0
,
1135 gen_rtx_NOT (mode
, XEXP (op1
, 1)));
1136 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1137 return simplify_gen_binary (AND
, mode
, op0
,
1138 gen_rtx_NOT (mode
, XEXP (op1
, 0)));
1143 if (trueop1
== constm1_rtx
)
1145 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1147 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1150 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1151 x is NaN, since x * 0 is then also NaN. Nor is it valid
1152 when the mode has signed zeros, since multiplying a negative
1153 number by 0 will give -0, not 0. */
1154 if (!HONOR_NANS (mode
)
1155 && !HONOR_SIGNED_ZEROS (mode
)
1156 && trueop1
== CONST0_RTX (mode
)
1157 && ! side_effects_p (op0
))
1160 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1161 However, ANSI says we can drop signals,
1162 so we can do this anyway. */
1163 if (trueop1
== CONST1_RTX (mode
))
1166 /* Convert multiply by constant power of two into shift unless
1167 we are still generating RTL. This test is a kludge. */
1168 if (GET_CODE (trueop1
) == CONST_INT
1169 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1170 /* If the mode is larger than the host word size, and the
1171 uppermost bit is set, then this isn't a power of two due
1172 to implicit sign extension. */
1173 && (width
<= HOST_BITS_PER_WIDE_INT
1174 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1175 && ! rtx_equal_function_value_matters
)
1176 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1178 /* x*2 is x+x and x*(-1) is -x */
1179 if (GET_CODE (trueop1
) == CONST_DOUBLE
1180 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1181 && GET_MODE (op0
) == mode
)
1184 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1186 if (REAL_VALUES_EQUAL (d
, dconst2
))
1187 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1189 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1190 return gen_rtx_NEG (mode
, op0
);
1195 if (trueop1
== const0_rtx
)
1197 if (GET_CODE (trueop1
) == CONST_INT
1198 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1199 == GET_MODE_MASK (mode
)))
1201 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1203 /* A | (~A) -> -1 */
1204 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1205 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1206 && ! side_effects_p (op0
)
1207 && GET_MODE_CLASS (mode
) != MODE_CC
)
1212 if (trueop1
== const0_rtx
)
1214 if (GET_CODE (trueop1
) == CONST_INT
1215 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1216 == GET_MODE_MASK (mode
)))
1217 return gen_rtx_NOT (mode
, op0
);
1218 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1219 && GET_MODE_CLASS (mode
) != MODE_CC
)
1224 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1226 if (GET_CODE (trueop1
) == CONST_INT
1227 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1228 == GET_MODE_MASK (mode
)))
1230 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1231 && GET_MODE_CLASS (mode
) != MODE_CC
)
1234 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1235 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1236 && ! side_effects_p (op0
)
1237 && GET_MODE_CLASS (mode
) != MODE_CC
)
1242 /* Convert divide by power of two into shift (divide by 1 handled
1244 if (GET_CODE (trueop1
) == CONST_INT
1245 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1246 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1248 /* ... fall through ... */
1251 if (trueop1
== CONST1_RTX (mode
))
1253 /* On some platforms DIV uses narrower mode than its
1255 rtx x
= gen_lowpart_common (mode
, op0
);
1258 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1259 return gen_lowpart_SUBREG (mode
, op0
);
1264 /* Maybe change 0 / x to 0. This transformation isn't safe for
1265 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1266 Nor is it safe for modes with signed zeros, since dividing
1267 0 by a negative number gives -0, not 0. */
1268 if (!HONOR_NANS (mode
)
1269 && !HONOR_SIGNED_ZEROS (mode
)
1270 && trueop0
== CONST0_RTX (mode
)
1271 && ! side_effects_p (op1
))
1274 /* Change division by a constant into multiplication. Only do
1275 this with -funsafe-math-optimizations. */
1276 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1277 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1278 && trueop1
!= CONST0_RTX (mode
)
1279 && flag_unsafe_math_optimizations
)
1282 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1284 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1286 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1287 return gen_rtx_MULT (mode
, op0
,
1288 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1294 /* Handle modulus by power of two (mod with 1 handled below). */
1295 if (GET_CODE (trueop1
) == CONST_INT
1296 && exact_log2 (INTVAL (trueop1
)) > 0)
1297 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1299 /* ... fall through ... */
1302 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1303 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1309 /* Rotating ~0 always results in ~0. */
1310 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1311 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1312 && ! side_effects_p (op1
))
1315 /* ... fall through ... */
1320 if (trueop1
== const0_rtx
)
1322 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1327 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1328 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1329 && ! side_effects_p (op0
))
1331 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1336 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1337 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1338 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1339 && ! side_effects_p (op0
))
1341 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1346 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1348 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1353 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1355 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1363 /* ??? There are simplifications that can be done. */
1373 /* Get the integer argument values in two forms:
1374 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1376 arg0
= INTVAL (trueop0
);
1377 arg1
= INTVAL (trueop1
);
1379 if (width
< HOST_BITS_PER_WIDE_INT
)
1381 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1382 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1385 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1386 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1389 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1390 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1398 /* Compute the value of the arithmetic. */
1403 val
= arg0s
+ arg1s
;
1407 val
= arg0s
- arg1s
;
1411 val
= arg0s
* arg1s
;
1416 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1419 val
= arg0s
/ arg1s
;
1424 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1427 val
= arg0s
% arg1s
;
1432 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1435 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1440 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1443 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1459 /* If shift count is undefined, don't fold it; let the machine do
1460 what it wants. But truncate it if the machine will do that. */
1464 #ifdef SHIFT_COUNT_TRUNCATED
1465 if (SHIFT_COUNT_TRUNCATED
)
1469 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1476 #ifdef SHIFT_COUNT_TRUNCATED
1477 if (SHIFT_COUNT_TRUNCATED
)
1481 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1488 #ifdef SHIFT_COUNT_TRUNCATED
1489 if (SHIFT_COUNT_TRUNCATED
)
1493 val
= arg0s
>> arg1
;
1495 /* Bootstrap compiler may not have sign extended the right shift.
1496 Manually extend the sign to insure bootstrap cc matches gcc. */
1497 if (arg0s
< 0 && arg1
> 0)
1498 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1507 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1508 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1516 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1517 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1521 /* Do nothing here. */
1525 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1529 val
= ((unsigned HOST_WIDE_INT
) arg0
1530 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1534 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1538 val
= ((unsigned HOST_WIDE_INT
) arg0
1539 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1546 val
= trunc_int_for_mode (val
, mode
);
1548 return GEN_INT (val
);
1551 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1554 Rather than test for specific case, we do this by a brute-force method
1555 and do all possible simplifications until no more changes occur. Then
1556 we rebuild the operation.
1558 If FORCE is true, then always generate the rtx. This is used to
1559 canonicalize stuff emitted from simplify_gen_binary. Note that this
1560 can still fail if the rtx is too complex. It won't fail just because
1561 the result is not 'simpler' than the input, however. */
1563 struct simplify_plus_minus_op_data
1570 simplify_plus_minus_op_data_cmp (p1
, p2
)
1574 const struct simplify_plus_minus_op_data
*d1
= p1
;
1575 const struct simplify_plus_minus_op_data
*d2
= p2
;
1577 return (commutative_operand_precedence (d2
->op
)
1578 - commutative_operand_precedence (d1
->op
));
1582 simplify_plus_minus (code
, mode
, op0
, op1
, force
)
1584 enum machine_mode mode
;
1588 struct simplify_plus_minus_op_data ops
[8];
1590 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
1591 int first
, negate
, changed
;
1594 memset ((char *) ops
, 0, sizeof ops
);
1596 /* Set up the two operands and then expand them until nothing has been
1597 changed. If we run out of room in our array, give up; this should
1598 almost never happen. */
1603 ops
[1].neg
= (code
== MINUS
);
1609 for (i
= 0; i
< n_ops
; i
++)
1611 rtx this_op
= ops
[i
].op
;
1612 int this_neg
= ops
[i
].neg
;
1613 enum rtx_code this_code
= GET_CODE (this_op
);
1622 ops
[n_ops
].op
= XEXP (this_op
, 1);
1623 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
1626 ops
[i
].op
= XEXP (this_op
, 0);
1632 ops
[i
].op
= XEXP (this_op
, 0);
1633 ops
[i
].neg
= ! this_neg
;
1639 && GET_CODE (XEXP (this_op
, 0)) == PLUS
1640 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
1641 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
1643 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
1644 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
1645 ops
[n_ops
].neg
= this_neg
;
1653 /* ~a -> (-a - 1) */
1656 ops
[n_ops
].op
= constm1_rtx
;
1657 ops
[n_ops
++].neg
= this_neg
;
1658 ops
[i
].op
= XEXP (this_op
, 0);
1659 ops
[i
].neg
= !this_neg
;
1667 ops
[i
].op
= neg_const_int (mode
, this_op
);
1680 /* If we only have two operands, we can't do anything. */
1681 if (n_ops
<= 2 && !force
)
1684 /* Count the number of CONSTs we didn't split above. */
1685 for (i
= 0; i
< n_ops
; i
++)
1686 if (GET_CODE (ops
[i
].op
) == CONST
)
1689 /* Now simplify each pair of operands until nothing changes. The first
1690 time through just simplify constants against each other. */
1697 for (i
= 0; i
< n_ops
- 1; i
++)
1698 for (j
= i
+ 1; j
< n_ops
; j
++)
1700 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
1701 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
1703 if (lhs
!= 0 && rhs
!= 0
1704 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
1706 enum rtx_code ncode
= PLUS
;
1712 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
1714 else if (swap_commutative_operands_p (lhs
, rhs
))
1715 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
1717 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
1719 /* Reject "simplifications" that just wrap the two
1720 arguments in a CONST. Failure to do so can result
1721 in infinite recursion with simplify_binary_operation
1722 when it calls us to simplify CONST operations. */
1724 && ! (GET_CODE (tem
) == CONST
1725 && GET_CODE (XEXP (tem
, 0)) == ncode
1726 && XEXP (XEXP (tem
, 0), 0) == lhs
1727 && XEXP (XEXP (tem
, 0), 1) == rhs
)
1728 /* Don't allow -x + -1 -> ~x simplifications in the
1729 first pass. This allows us the chance to combine
1730 the -1 with other constants. */
1732 && GET_CODE (tem
) == NOT
1733 && XEXP (tem
, 0) == rhs
))
1736 if (GET_CODE (tem
) == NEG
)
1737 tem
= XEXP (tem
, 0), lneg
= !lneg
;
1738 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
1739 tem
= neg_const_int (mode
, tem
), lneg
= 0;
1743 ops
[j
].op
= NULL_RTX
;
1753 /* Pack all the operands to the lower-numbered entries. */
1754 for (i
= 0, j
= 0; j
< n_ops
; j
++)
1759 /* Sort the operations based on swap_commutative_operands_p. */
1760 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
1762 /* We suppressed creation of trivial CONST expressions in the
1763 combination loop to avoid recursion. Create one manually now.
1764 The combination loop should have ensured that there is exactly
1765 one CONST_INT, and the sort will have ensured that it is last
1766 in the array and that any other constant will be next-to-last. */
1769 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
1770 && CONSTANT_P (ops
[n_ops
- 2].op
))
1772 rtx value
= ops
[n_ops
- 1].op
;
1773 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
1774 value
= neg_const_int (mode
, value
);
1775 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
1779 /* Count the number of CONSTs that we generated. */
1781 for (i
= 0; i
< n_ops
; i
++)
1782 if (GET_CODE (ops
[i
].op
) == CONST
)
1785 /* Give up if we didn't reduce the number of operands we had. Make
1786 sure we count a CONST as two operands. If we have the same
1787 number of operands, but have made more CONSTs than before, this
1788 is also an improvement, so accept it. */
1790 && (n_ops
+ n_consts
> input_ops
1791 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
1794 /* Put a non-negated operand first. If there aren't any, make all
1795 operands positive and negate the whole thing later. */
1798 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
1802 for (i
= 0; i
< n_ops
; i
++)
1814 /* Now make the result by performing the requested operations. */
1816 for (i
= 1; i
< n_ops
; i
++)
1817 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
1818 mode
, result
, ops
[i
].op
);
1820 return negate
? gen_rtx_NEG (mode
, result
) : result
;
1823 /* Like simplify_binary_operation except used for relational operators.
1824 MODE is the mode of the operands, not that of the result. If MODE
1825 is VOIDmode, both operands must also be VOIDmode and we compare the
1826 operands in "infinite precision".
1828 If no simplification is possible, this function returns zero. Otherwise,
1829 it returns either const_true_rtx or const0_rtx. */
1832 simplify_relational_operation (code
, mode
, op0
, op1
)
1834 enum machine_mode mode
;
1837 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
1842 if (mode
== VOIDmode
1843 && (GET_MODE (op0
) != VOIDmode
1844 || GET_MODE (op1
) != VOIDmode
))
1847 /* If op0 is a compare, extract the comparison arguments from it. */
1848 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
1849 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
1851 trueop0
= avoid_constant_pool_reference (op0
);
1852 trueop1
= avoid_constant_pool_reference (op1
);
1854 /* We can't simplify MODE_CC values since we don't know what the
1855 actual comparison is. */
1856 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
1863 /* Make sure the constant is second. */
1864 if (swap_commutative_operands_p (trueop0
, trueop1
))
1866 tem
= op0
, op0
= op1
, op1
= tem
;
1867 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
1868 code
= swap_condition (code
);
1871 /* For integer comparisons of A and B maybe we can simplify A - B and can
1872 then simplify a comparison of that with zero. If A and B are both either
1873 a register or a CONST_INT, this can't help; testing for these cases will
1874 prevent infinite recursion here and speed things up.
1876 If CODE is an unsigned comparison, then we can never do this optimization,
1877 because it gives an incorrect result if the subtraction wraps around zero.
1878 ANSI C defines unsigned operations such that they never overflow, and
1879 thus such cases can not be ignored. */
1881 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
1882 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
1883 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
1884 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
1885 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
1886 return simplify_relational_operation (signed_condition (code
),
1887 mode
, tem
, const0_rtx
);
1889 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
1890 return const_true_rtx
;
1892 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
1895 /* For modes without NaNs, if the two operands are equal, we know the
1897 if (!HONOR_NANS (GET_MODE (trueop0
)) && rtx_equal_p (trueop0
, trueop1
))
1898 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
1900 /* If the operands are floating-point constants, see if we can fold
1902 else if (GET_CODE (trueop0
) == CONST_DOUBLE
1903 && GET_CODE (trueop1
) == CONST_DOUBLE
1904 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
1906 REAL_VALUE_TYPE d0
, d1
;
1908 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
1909 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
1911 /* Comparisons are unordered iff at least one of the values is NaN. */
1912 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
1922 return const_true_rtx
;
1935 equal
= REAL_VALUES_EQUAL (d0
, d1
);
1936 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
1937 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
1940 /* Otherwise, see if the operands are both integers. */
1941 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
1942 && (GET_CODE (trueop0
) == CONST_DOUBLE
1943 || GET_CODE (trueop0
) == CONST_INT
)
1944 && (GET_CODE (trueop1
) == CONST_DOUBLE
1945 || GET_CODE (trueop1
) == CONST_INT
))
1947 int width
= GET_MODE_BITSIZE (mode
);
1948 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
1949 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
1951 /* Get the two words comprising each integer constant. */
1952 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1954 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
1955 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
1959 l0u
= l0s
= INTVAL (trueop0
);
1960 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
1963 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1965 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
1966 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
1970 l1u
= l1s
= INTVAL (trueop1
);
1971 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
1974 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1975 we have to sign or zero-extend the values. */
1976 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
1978 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1979 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1981 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1982 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1984 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1985 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1987 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
1988 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
1990 equal
= (h0u
== h1u
&& l0u
== l1u
);
1991 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
1992 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
1993 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
1994 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
1997 /* Otherwise, there are some code-specific tests we can make. */
2003 /* References to the frame plus a constant or labels cannot
2004 be zero, but a SYMBOL_REF can due to #pragma weak. */
2005 if (((NONZERO_BASE_PLUS_P (op0
) && trueop1
== const0_rtx
)
2006 || GET_CODE (trueop0
) == LABEL_REF
)
2007 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2008 /* On some machines, the ap reg can be 0 sometimes. */
2009 && op0
!= arg_pointer_rtx
2016 if (((NONZERO_BASE_PLUS_P (op0
) && trueop1
== const0_rtx
)
2017 || GET_CODE (trueop0
) == LABEL_REF
)
2018 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2019 && op0
!= arg_pointer_rtx
2022 return const_true_rtx
;
2026 /* Unsigned values are never negative. */
2027 if (trueop1
== const0_rtx
)
2028 return const_true_rtx
;
2032 if (trueop1
== const0_rtx
)
2037 /* Unsigned values are never greater than the largest
2039 if (GET_CODE (trueop1
) == CONST_INT
2040 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2041 && INTEGRAL_MODE_P (mode
))
2042 return const_true_rtx
;
2046 if (GET_CODE (trueop1
) == CONST_INT
2047 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2048 && INTEGRAL_MODE_P (mode
))
2059 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2065 return equal
? const_true_rtx
: const0_rtx
;
2068 return ! equal
? const_true_rtx
: const0_rtx
;
2071 return op0lt
? const_true_rtx
: const0_rtx
;
2074 return op1lt
? const_true_rtx
: const0_rtx
;
2076 return op0ltu
? const_true_rtx
: const0_rtx
;
2078 return op1ltu
? const_true_rtx
: const0_rtx
;
2081 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2084 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2086 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2088 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2090 return const_true_rtx
;
2098 /* Simplify CODE, an operation with result mode MODE and three operands,
2099 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2100 a constant. Return 0 if no simplifications is possible. */
2103 simplify_ternary_operation (code
, mode
, op0_mode
, op0
, op1
, op2
)
2105 enum machine_mode mode
, op0_mode
;
2108 unsigned int width
= GET_MODE_BITSIZE (mode
);
2110 /* VOIDmode means "infinite" precision. */
2112 width
= HOST_BITS_PER_WIDE_INT
;
2118 if (GET_CODE (op0
) == CONST_INT
2119 && GET_CODE (op1
) == CONST_INT
2120 && GET_CODE (op2
) == CONST_INT
2121 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2122 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2124 /* Extracting a bit-field from a constant */
2125 HOST_WIDE_INT val
= INTVAL (op0
);
2127 if (BITS_BIG_ENDIAN
)
2128 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2129 - INTVAL (op2
) - INTVAL (op1
));
2131 val
>>= INTVAL (op2
);
2133 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2135 /* First zero-extend. */
2136 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2137 /* If desired, propagate sign bit. */
2138 if (code
== SIGN_EXTRACT
2139 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2140 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2143 /* Clear the bits that don't belong in our mode,
2144 unless they and our sign bit are all one.
2145 So we get either a reasonable negative value or a reasonable
2146 unsigned value for this mode. */
2147 if (width
< HOST_BITS_PER_WIDE_INT
2148 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2149 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2150 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2152 return GEN_INT (val
);
2157 if (GET_CODE (op0
) == CONST_INT
)
2158 return op0
!= const0_rtx
? op1
: op2
;
2160 /* Convert a == b ? b : a to "a". */
2161 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
2162 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
2163 && rtx_equal_p (XEXP (op0
, 0), op1
)
2164 && rtx_equal_p (XEXP (op0
, 1), op2
))
2166 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
2167 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
2168 && rtx_equal_p (XEXP (op0
, 1), op1
)
2169 && rtx_equal_p (XEXP (op0
, 0), op2
))
2171 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2173 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2174 ? GET_MODE (XEXP (op0
, 1))
2175 : GET_MODE (XEXP (op0
, 0)));
2177 if (cmp_mode
== VOIDmode
)
2178 cmp_mode
= op0_mode
;
2179 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2180 XEXP (op0
, 0), XEXP (op0
, 1));
2182 /* See if any simplifications were possible. */
2183 if (temp
== const0_rtx
)
2185 else if (temp
== const1_rtx
)
2190 /* Look for happy constants in op1 and op2. */
2191 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2193 HOST_WIDE_INT t
= INTVAL (op1
);
2194 HOST_WIDE_INT f
= INTVAL (op2
);
2196 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2197 code
= GET_CODE (op0
);
2198 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2201 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2209 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2221 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2222 Return 0 if no simplifications is possible. */
2224 simplify_subreg (outermode
, op
, innermode
, byte
)
2227 enum machine_mode outermode
, innermode
;
2229 /* Little bit of sanity checking. */
2230 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2231 || innermode
== BLKmode
|| outermode
== BLKmode
)
2234 if (GET_MODE (op
) != innermode
2235 && GET_MODE (op
) != VOIDmode
)
2238 if (byte
% GET_MODE_SIZE (outermode
)
2239 || byte
>= GET_MODE_SIZE (innermode
))
2242 if (outermode
== innermode
&& !byte
)
2245 /* Attempt to simplify constant to non-SUBREG expression. */
2246 if (CONSTANT_P (op
))
2249 unsigned HOST_WIDE_INT val
= 0;
2251 /* ??? This code is partly redundant with code below, but can handle
2252 the subregs of floats and similar corner cases.
2253 Later it we should move all simplification code here and rewrite
2254 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2255 using SIMPLIFY_SUBREG. */
2256 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
2258 rtx
new = gen_lowpart_if_possible (outermode
, op
);
2263 /* Similar comment as above apply here. */
2264 if (GET_MODE_SIZE (outermode
) == UNITS_PER_WORD
2265 && GET_MODE_SIZE (innermode
) > UNITS_PER_WORD
2266 && GET_MODE_CLASS (outermode
) == MODE_INT
)
2268 rtx
new = constant_subword (op
,
2269 (byte
/ UNITS_PER_WORD
),
2275 offset
= byte
* BITS_PER_UNIT
;
2276 switch (GET_CODE (op
))
2279 if (GET_MODE (op
) != VOIDmode
)
2282 /* We can't handle this case yet. */
2283 if (GET_MODE_BITSIZE (outermode
) >= HOST_BITS_PER_WIDE_INT
)
2286 part
= offset
>= HOST_BITS_PER_WIDE_INT
;
2287 if ((BITS_PER_WORD
> HOST_BITS_PER_WIDE_INT
2288 && BYTES_BIG_ENDIAN
)
2289 || (BITS_PER_WORD
<= HOST_BITS_PER_WIDE_INT
2290 && WORDS_BIG_ENDIAN
))
2292 val
= part
? CONST_DOUBLE_HIGH (op
) : CONST_DOUBLE_LOW (op
);
2293 offset
%= HOST_BITS_PER_WIDE_INT
;
2295 /* We've already picked the word we want from a double, so
2296 pretend this is actually an integer. */
2297 innermode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
2301 if (GET_CODE (op
) == CONST_INT
)
2304 /* We don't handle synthetizing of non-integral constants yet. */
2305 if (GET_MODE_CLASS (outermode
) != MODE_INT
)
2308 if (BYTES_BIG_ENDIAN
|| WORDS_BIG_ENDIAN
)
2310 if (WORDS_BIG_ENDIAN
)
2311 offset
= (GET_MODE_BITSIZE (innermode
)
2312 - GET_MODE_BITSIZE (outermode
) - offset
);
2313 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
2314 && GET_MODE_SIZE (outermode
) < UNITS_PER_WORD
)
2315 offset
= (offset
+ BITS_PER_WORD
- GET_MODE_BITSIZE (outermode
)
2316 - 2 * (offset
% BITS_PER_WORD
));
2319 if (offset
>= HOST_BITS_PER_WIDE_INT
)
2320 return ((HOST_WIDE_INT
) val
< 0) ? constm1_rtx
: const0_rtx
;
2324 if (GET_MODE_BITSIZE (outermode
) < HOST_BITS_PER_WIDE_INT
)
2325 val
= trunc_int_for_mode (val
, outermode
);
2326 return GEN_INT (val
);
2333 /* Changing mode twice with SUBREG => just change it once,
2334 or not at all if changing back op starting mode. */
2335 if (GET_CODE (op
) == SUBREG
)
2337 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
2338 int final_offset
= byte
+ SUBREG_BYTE (op
);
2341 if (outermode
== innermostmode
2342 && byte
== 0 && SUBREG_BYTE (op
) == 0)
2343 return SUBREG_REG (op
);
2345 /* The SUBREG_BYTE represents offset, as if the value were stored
2346 in memory. Irritating exception is paradoxical subreg, where
2347 we define SUBREG_BYTE to be 0. On big endian machines, this
2348 value should be negative. For a moment, undo this exception. */
2349 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
2351 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
2352 if (WORDS_BIG_ENDIAN
)
2353 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2354 if (BYTES_BIG_ENDIAN
)
2355 final_offset
+= difference
% UNITS_PER_WORD
;
2357 if (SUBREG_BYTE (op
) == 0
2358 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
2360 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
2361 if (WORDS_BIG_ENDIAN
)
2362 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2363 if (BYTES_BIG_ENDIAN
)
2364 final_offset
+= difference
% UNITS_PER_WORD
;
2367 /* See whether resulting subreg will be paradoxical. */
2368 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
2370 /* In nonparadoxical subregs we can't handle negative offsets. */
2371 if (final_offset
< 0)
2373 /* Bail out in case resulting subreg would be incorrect. */
2374 if (final_offset
% GET_MODE_SIZE (outermode
)
2375 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
2381 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
2383 /* In paradoxical subreg, see if we are still looking on lower part.
2384 If so, our SUBREG_BYTE will be 0. */
2385 if (WORDS_BIG_ENDIAN
)
2386 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2387 if (BYTES_BIG_ENDIAN
)
2388 offset
+= difference
% UNITS_PER_WORD
;
2389 if (offset
== final_offset
)
2395 /* Recurse for futher possible simplifications. */
2396 new = simplify_subreg (outermode
, SUBREG_REG (op
),
2397 GET_MODE (SUBREG_REG (op
)),
2401 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
2404 /* SUBREG of a hard register => just change the register number
2405 and/or mode. If the hard register is not valid in that mode,
2406 suppress this simplification. If the hard register is the stack,
2407 frame, or argument pointer, leave this as a SUBREG. */
2410 && (! REG_FUNCTION_VALUE_P (op
)
2411 || ! rtx_equal_function_value_matters
)
2412 #ifdef CLASS_CANNOT_CHANGE_MODE
2413 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode
, innermode
)
2414 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
2415 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
2416 && (TEST_HARD_REG_BIT
2417 (reg_class_contents
[(int) CLASS_CANNOT_CHANGE_MODE
],
2420 && REGNO (op
) < FIRST_PSEUDO_REGISTER
2421 && ((reload_completed
&& !frame_pointer_needed
)
2422 || (REGNO (op
) != FRAME_POINTER_REGNUM
2423 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2424 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
2427 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2428 && REGNO (op
) != ARG_POINTER_REGNUM
2430 && REGNO (op
) != STACK_POINTER_REGNUM
)
2432 int final_regno
= subreg_hard_regno (gen_rtx_SUBREG (outermode
, op
, byte
),
2435 /* ??? We do allow it if the current REG is not valid for
2436 its mode. This is a kludge to work around how float/complex
2437 arguments are passed on 32-bit Sparc and should be fixed. */
2438 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
2439 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
2441 rtx x
= gen_rtx_REG (outermode
, final_regno
);
2443 /* Propagate original regno. We don't have any way to specify
2444 the offset inside orignal regno, so do so only for lowpart.
2445 The information is used only by alias analysis that can not
2446 grog partial register anyway. */
2448 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
2449 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
2454 /* If we have a SUBREG of a register that we are replacing and we are
2455 replacing it with a MEM, make a new MEM and try replacing the
2456 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2457 or if we would be widening it. */
2459 if (GET_CODE (op
) == MEM
2460 && ! mode_dependent_address_p (XEXP (op
, 0))
2461 /* Allow splitting of volatile memory references in case we don't
2462 have instruction to move the whole thing. */
2463 && (! MEM_VOLATILE_P (op
)
2464 || ! have_insn_for (SET
, innermode
))
2465 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
2466 return adjust_address_nv (op
, outermode
, byte
);
2468 /* Handle complex values represented as CONCAT
2469 of real and imaginary part. */
2470 if (GET_CODE (op
) == CONCAT
)
2472 int is_realpart
= byte
< GET_MODE_UNIT_SIZE (innermode
);
2473 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
2474 unsigned int final_offset
;
2477 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
2478 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
2481 /* We can at least simplify it by referring directly to the relevant part. */
2482 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
2487 /* Make a SUBREG operation or equivalent if it folds. */
2490 simplify_gen_subreg (outermode
, op
, innermode
, byte
)
2493 enum machine_mode outermode
, innermode
;
2496 /* Little bit of sanity checking. */
2497 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2498 || innermode
== BLKmode
|| outermode
== BLKmode
)
2501 if (GET_MODE (op
) != innermode
2502 && GET_MODE (op
) != VOIDmode
)
2505 if (byte
% GET_MODE_SIZE (outermode
)
2506 || byte
>= GET_MODE_SIZE (innermode
))
2509 if (GET_CODE (op
) == QUEUED
)
2512 new = simplify_subreg (outermode
, op
, innermode
, byte
);
2516 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
2519 return gen_rtx_SUBREG (outermode
, op
, byte
);
2521 /* Simplify X, an rtx expression.
2523 Return the simplified expression or NULL if no simplifications
2526 This is the preferred entry point into the simplification routines;
2527 however, we still allow passes to call the more specific routines.
2529 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2530 code that need to be unified.
2532 1. fold_rtx in cse.c. This code uses various CSE specific
2533 information to aid in RTL simplification.
2535 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2536 it uses combine specific information to aid in RTL
2539 3. The routines in this file.
2542 Long term we want to only have one body of simplification code; to
2543 get to that state I recommend the following steps:
2545 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2546 which are not pass dependent state into these routines.
2548 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2549 use this routine whenever possible.
2551 3. Allow for pass dependent state to be provided to these
2552 routines and add simplifications based on the pass dependent
2553 state. Remove code from cse.c & combine.c that becomes
2556 It will take time, but ultimately the compiler will be easier to
2557 maintain and improve. It's totally silly that when we add a
2558 simplification that it needs to be added to 4 places (3 for RTL
2559 simplification and 1 for tree simplification. */
2565 enum rtx_code code
= GET_CODE (x
);
2566 enum machine_mode mode
= GET_MODE (x
);
2568 switch (GET_RTX_CLASS (code
))
2571 return simplify_unary_operation (code
, mode
,
2572 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
2574 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
2579 XEXP (x
, 0) = XEXP (x
, 1);
2581 return simplify_binary_operation (code
, mode
,
2582 XEXP (x
, 0), XEXP (x
, 1));
2586 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
2590 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
2591 XEXP (x
, 0), XEXP (x
, 1),
2595 return simplify_relational_operation (code
,
2596 ((GET_MODE (XEXP (x
, 0))
2598 ? GET_MODE (XEXP (x
, 0))
2599 : GET_MODE (XEXP (x
, 1))),
2600 XEXP (x
, 0), XEXP (x
, 1));
2602 /* The only case we try to handle is a SUBREG. */
2604 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
2605 GET_MODE (SUBREG_REG (x
)),