1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
57 /* Negate a CONST_INT rtx, truncating (because a conversion from a
58 maximally negative number can overflow). */
60 neg_const_int (enum machine_mode mode
, rtx i
)
62 return gen_int_mode (- INTVAL (i
), mode
);
66 /* Make a binary operation by properly ordering the operands and
67 seeing if the expression folds. */
70 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
75 /* Put complex operands first and constants second if commutative. */
76 if (GET_RTX_CLASS (code
) == 'c'
77 && swap_commutative_operands_p (op0
, op1
))
78 tem
= op0
, op0
= op1
, op1
= tem
;
80 /* If this simplifies, do it. */
81 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
85 /* Handle addition and subtraction specially. Otherwise, just form
88 if (code
== PLUS
|| code
== MINUS
)
90 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
95 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
98 /* If X is a MEM referencing the constant pool, return the real value.
99 Otherwise return X. */
101 avoid_constant_pool_reference (rtx x
)
104 enum machine_mode cmode
;
106 switch (GET_CODE (x
))
112 /* Handle float extensions of constant pool references. */
114 c
= avoid_constant_pool_reference (tmp
);
115 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
119 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
120 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
130 /* Call target hook to avoid the effects of -fpic etc... */
131 addr
= (*targetm
.delegitimize_address
) (addr
);
133 if (GET_CODE (addr
) == LO_SUM
)
134 addr
= XEXP (addr
, 1);
136 if (GET_CODE (addr
) != SYMBOL_REF
137 || ! CONSTANT_POOL_ADDRESS_P (addr
))
140 c
= get_pool_constant (addr
);
141 cmode
= get_pool_mode (addr
);
143 /* If we're accessing the constant in a different mode than it was
144 originally stored, attempt to fix that up via subreg simplifications.
145 If that fails we have no choice but to return the original memory. */
146 if (cmode
!= GET_MODE (x
))
148 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
155 /* Make a unary operation by first seeing if it folds and otherwise making
156 the specified operation. */
159 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
160 enum machine_mode op_mode
)
164 /* If this simplifies, use it. */
165 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
168 return gen_rtx_fmt_e (code
, mode
, op
);
171 /* Likewise for ternary operations. */
174 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
175 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
179 /* If this simplifies, use it. */
180 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
184 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
187 /* Likewise, for relational operations.
188 CMP_MODE specifies mode comparison is done in.
192 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
193 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
197 if ((tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
)) != 0)
200 /* For the following tests, ensure const0_rtx is op1. */
201 if (op0
== const0_rtx
&& swap_commutative_operands_p (op0
, op1
))
202 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
204 /* If op0 is a compare, extract the comparison arguments from it. */
205 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
206 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
208 /* If op0 is a comparison, extract the comparison arguments form it. */
209 if (code
== NE
&& op1
== const0_rtx
210 && GET_RTX_CLASS (GET_CODE (op0
)) == '<')
212 else if (code
== EQ
&& op1
== const0_rtx
)
214 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
215 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
225 /* Put complex operands first and constants second. */
226 if (swap_commutative_operands_p (op0
, op1
))
227 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
229 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
232 /* Replace all occurrences of OLD in X with NEW and try to simplify the
233 resulting RTX. Return a new RTX which is as simplified as possible. */
236 simplify_replace_rtx (rtx x
, rtx old
, rtx
new)
238 enum rtx_code code
= GET_CODE (x
);
239 enum machine_mode mode
= GET_MODE (x
);
241 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
242 to build a new expression substituting recursively. If we can't do
243 anything, return our input. */
248 switch (GET_RTX_CLASS (code
))
252 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
253 rtx op
= (XEXP (x
, 0) == old
254 ? new : simplify_replace_rtx (XEXP (x
, 0), old
, new));
256 return simplify_gen_unary (code
, mode
, op
, op_mode
);
262 simplify_gen_binary (code
, mode
,
263 simplify_replace_rtx (XEXP (x
, 0), old
, new),
264 simplify_replace_rtx (XEXP (x
, 1), old
, new));
267 enum machine_mode op_mode
= (GET_MODE (XEXP (x
, 0)) != VOIDmode
268 ? GET_MODE (XEXP (x
, 0))
269 : GET_MODE (XEXP (x
, 1)));
270 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
271 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
274 simplify_gen_relational (code
, mode
,
277 : GET_MODE (op0
) != VOIDmode
286 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
287 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
290 simplify_gen_ternary (code
, mode
,
295 simplify_replace_rtx (XEXP (x
, 1), old
, new),
296 simplify_replace_rtx (XEXP (x
, 2), old
, new));
300 /* The only case we try to handle is a SUBREG. */
304 exp
= simplify_gen_subreg (GET_MODE (x
),
305 simplify_replace_rtx (SUBREG_REG (x
),
307 GET_MODE (SUBREG_REG (x
)),
316 return replace_equiv_address_nv (x
,
317 simplify_replace_rtx (XEXP (x
, 0),
319 else if (code
== LO_SUM
)
321 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
322 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
324 /* (lo_sum (high x) x) -> x */
325 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
328 return gen_rtx_LO_SUM (mode
, op0
, op1
);
330 else if (code
== REG
)
332 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
344 /* Try to simplify a unary operation CODE whose output mode is to be
345 MODE with input operand OP whose mode was originally OP_MODE.
346 Return zero if no simplification can be made. */
348 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
349 rtx op
, enum machine_mode op_mode
)
351 unsigned int width
= GET_MODE_BITSIZE (mode
);
352 rtx trueop
= avoid_constant_pool_reference (op
);
354 if (code
== VEC_DUPLICATE
)
356 if (!VECTOR_MODE_P (mode
))
358 if (GET_MODE (trueop
) != VOIDmode
359 && !VECTOR_MODE_P (GET_MODE (trueop
))
360 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
362 if (GET_MODE (trueop
) != VOIDmode
363 && VECTOR_MODE_P (GET_MODE (trueop
))
364 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
366 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
367 || GET_CODE (trueop
) == CONST_VECTOR
)
369 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
370 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
371 rtvec v
= rtvec_alloc (n_elts
);
374 if (GET_CODE (trueop
) != CONST_VECTOR
)
375 for (i
= 0; i
< n_elts
; i
++)
376 RTVEC_ELT (v
, i
) = trueop
;
379 enum machine_mode inmode
= GET_MODE (trueop
);
380 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
381 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
383 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
385 for (i
= 0; i
< n_elts
; i
++)
386 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
388 return gen_rtx_CONST_VECTOR (mode
, v
);
392 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
394 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
395 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
396 enum machine_mode opmode
= GET_MODE (trueop
);
397 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
398 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
399 rtvec v
= rtvec_alloc (n_elts
);
402 if (op_n_elts
!= n_elts
)
405 for (i
= 0; i
< n_elts
; i
++)
407 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
408 CONST_VECTOR_ELT (trueop
, i
),
409 GET_MODE_INNER (opmode
));
412 RTVEC_ELT (v
, i
) = x
;
414 return gen_rtx_CONST_VECTOR (mode
, v
);
417 /* The order of these tests is critical so that, for example, we don't
418 check the wrong mode (input vs. output) for a conversion operation,
419 such as FIX. At some point, this should be simplified. */
421 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
422 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
424 HOST_WIDE_INT hv
, lv
;
427 if (GET_CODE (trueop
) == CONST_INT
)
428 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
430 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
432 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
433 d
= real_value_truncate (mode
, d
);
434 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
436 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
437 && (GET_CODE (trueop
) == CONST_DOUBLE
438 || GET_CODE (trueop
) == CONST_INT
))
440 HOST_WIDE_INT hv
, lv
;
443 if (GET_CODE (trueop
) == CONST_INT
)
444 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
446 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
448 if (op_mode
== VOIDmode
)
450 /* We don't know how to interpret negative-looking numbers in
451 this case, so don't try to fold those. */
455 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
458 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
460 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
461 d
= real_value_truncate (mode
, d
);
462 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
465 if (GET_CODE (trueop
) == CONST_INT
466 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
468 HOST_WIDE_INT arg0
= INTVAL (trueop
);
482 val
= (arg0
>= 0 ? arg0
: - arg0
);
486 /* Don't use ffs here. Instead, get low order bit and then its
487 number. If arg0 is zero, this will return 0, as desired. */
488 arg0
&= GET_MODE_MASK (mode
);
489 val
= exact_log2 (arg0
& (- arg0
)) + 1;
493 arg0
&= GET_MODE_MASK (mode
);
494 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
497 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
501 arg0
&= GET_MODE_MASK (mode
);
504 /* Even if the value at zero is undefined, we have to come
505 up with some replacement. Seems good enough. */
506 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
507 val
= GET_MODE_BITSIZE (mode
);
510 val
= exact_log2 (arg0
& -arg0
);
514 arg0
&= GET_MODE_MASK (mode
);
517 val
++, arg0
&= arg0
- 1;
521 arg0
&= GET_MODE_MASK (mode
);
524 val
++, arg0
&= arg0
- 1;
533 /* When zero-extending a CONST_INT, we need to know its
535 if (op_mode
== VOIDmode
)
537 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
539 /* If we were really extending the mode,
540 we would have to distinguish between zero-extension
541 and sign-extension. */
542 if (width
!= GET_MODE_BITSIZE (op_mode
))
546 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
547 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
553 if (op_mode
== VOIDmode
)
555 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
557 /* If we were really extending the mode,
558 we would have to distinguish between zero-extension
559 and sign-extension. */
560 if (width
!= GET_MODE_BITSIZE (op_mode
))
564 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
567 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
569 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
570 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
587 val
= trunc_int_for_mode (val
, mode
);
589 return GEN_INT (val
);
592 /* We can do some operations on integer CONST_DOUBLEs. Also allow
593 for a DImode operation on a CONST_INT. */
594 else if (GET_MODE (trueop
) == VOIDmode
595 && width
<= HOST_BITS_PER_WIDE_INT
* 2
596 && (GET_CODE (trueop
) == CONST_DOUBLE
597 || GET_CODE (trueop
) == CONST_INT
))
599 unsigned HOST_WIDE_INT l1
, lv
;
600 HOST_WIDE_INT h1
, hv
;
602 if (GET_CODE (trueop
) == CONST_DOUBLE
)
603 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
605 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
615 neg_double (l1
, h1
, &lv
, &hv
);
620 neg_double (l1
, h1
, &lv
, &hv
);
632 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
635 lv
= exact_log2 (l1
& -l1
) + 1;
641 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
643 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
644 - HOST_BITS_PER_WIDE_INT
;
652 lv
= GET_MODE_BITSIZE (mode
);
654 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
657 lv
= exact_log2 (l1
& -l1
);
680 /* This is just a change-of-mode, so do nothing. */
685 if (op_mode
== VOIDmode
)
688 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
692 lv
= l1
& GET_MODE_MASK (op_mode
);
696 if (op_mode
== VOIDmode
697 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
701 lv
= l1
& GET_MODE_MASK (op_mode
);
702 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
703 && (lv
& ((HOST_WIDE_INT
) 1
704 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
705 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
707 hv
= HWI_SIGN_EXTEND (lv
);
718 return immed_double_const (lv
, hv
, mode
);
721 else if (GET_CODE (trueop
) == CONST_DOUBLE
722 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
724 REAL_VALUE_TYPE d
, t
;
725 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
730 if (HONOR_SNANS (mode
) && real_isnan (&d
))
732 real_sqrt (&t
, mode
, &d
);
736 d
= REAL_VALUE_ABS (d
);
739 d
= REAL_VALUE_NEGATE (d
);
742 d
= real_value_truncate (mode
, d
);
745 /* All this does is change the mode. */
748 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
754 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
757 else if (GET_CODE (trueop
) == CONST_DOUBLE
758 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
759 && GET_MODE_CLASS (mode
) == MODE_INT
760 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
764 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
767 case FIX
: i
= REAL_VALUE_FIX (d
); break;
768 case UNSIGNED_FIX
: i
= REAL_VALUE_UNSIGNED_FIX (d
); break;
772 return gen_int_mode (i
, mode
);
775 /* This was formerly used only for non-IEEE float.
776 eggert@twinsun.com says it is safe for IEEE also. */
779 enum rtx_code reversed
;
780 /* There are some simplifications we can do even if the operands
785 /* (not (not X)) == X. */
786 if (GET_CODE (op
) == NOT
)
789 /* (not (eq X Y)) == (ne X Y), etc. */
790 if (mode
== BImode
&& GET_RTX_CLASS (GET_CODE (op
)) == '<'
791 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
793 return gen_rtx_fmt_ee (reversed
,
794 op_mode
, XEXP (op
, 0), XEXP (op
, 1));
798 /* (neg (neg X)) == X. */
799 if (GET_CODE (op
) == NEG
)
804 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
805 becomes just the MINUS if its mode is MODE. This allows
806 folding switch statements on machines using casesi (such as
808 if (GET_CODE (op
) == TRUNCATE
809 && GET_MODE (XEXP (op
, 0)) == mode
810 && GET_CODE (XEXP (op
, 0)) == MINUS
811 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
812 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
815 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
816 if (! POINTERS_EXTEND_UNSIGNED
817 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
819 || (GET_CODE (op
) == SUBREG
820 && GET_CODE (SUBREG_REG (op
)) == REG
821 && REG_POINTER (SUBREG_REG (op
))
822 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
823 return convert_memory_address (Pmode
, op
);
827 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
829 if (POINTERS_EXTEND_UNSIGNED
> 0
830 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
832 || (GET_CODE (op
) == SUBREG
833 && GET_CODE (SUBREG_REG (op
)) == REG
834 && REG_POINTER (SUBREG_REG (op
))
835 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
836 return convert_memory_address (Pmode
, op
);
848 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
849 and OP1. Return 0 if no simplification is possible.
851 Don't use this for relational operations such as EQ or LT.
852 Use simplify_relational_operation instead. */
854 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
857 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
859 unsigned int width
= GET_MODE_BITSIZE (mode
);
861 rtx trueop0
= avoid_constant_pool_reference (op0
);
862 rtx trueop1
= avoid_constant_pool_reference (op1
);
864 /* Relational operations don't work here. We must know the mode
865 of the operands in order to do the comparison correctly.
866 Assuming a full word can give incorrect results.
867 Consider comparing 128 with -128 in QImode. */
869 if (GET_RTX_CLASS (code
) == '<')
872 /* Make sure the constant is second. */
873 if (GET_RTX_CLASS (code
) == 'c'
874 && swap_commutative_operands_p (trueop0
, trueop1
))
876 tem
= op0
, op0
= op1
, op1
= tem
;
877 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
880 if (VECTOR_MODE_P (mode
)
881 && GET_CODE (trueop0
) == CONST_VECTOR
882 && GET_CODE (trueop1
) == CONST_VECTOR
)
884 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
885 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
886 enum machine_mode op0mode
= GET_MODE (trueop0
);
887 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
888 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
889 enum machine_mode op1mode
= GET_MODE (trueop1
);
890 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
891 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
892 rtvec v
= rtvec_alloc (n_elts
);
895 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
898 for (i
= 0; i
< n_elts
; i
++)
900 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
901 CONST_VECTOR_ELT (trueop0
, i
),
902 CONST_VECTOR_ELT (trueop1
, i
));
905 RTVEC_ELT (v
, i
) = x
;
908 return gen_rtx_CONST_VECTOR (mode
, v
);
911 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
912 && GET_CODE (trueop0
) == CONST_DOUBLE
913 && GET_CODE (trueop1
) == CONST_DOUBLE
914 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
916 REAL_VALUE_TYPE f0
, f1
, value
;
918 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
919 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
920 f0
= real_value_truncate (mode
, f0
);
921 f1
= real_value_truncate (mode
, f1
);
924 && !MODE_HAS_INFINITIES (mode
)
925 && REAL_VALUES_EQUAL (f1
, dconst0
))
928 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
930 value
= real_value_truncate (mode
, value
);
931 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
934 /* We can fold some multi-word operations. */
935 if (GET_MODE_CLASS (mode
) == MODE_INT
936 && width
== HOST_BITS_PER_WIDE_INT
* 2
937 && (GET_CODE (trueop0
) == CONST_DOUBLE
938 || GET_CODE (trueop0
) == CONST_INT
)
939 && (GET_CODE (trueop1
) == CONST_DOUBLE
940 || GET_CODE (trueop1
) == CONST_INT
))
942 unsigned HOST_WIDE_INT l1
, l2
, lv
;
943 HOST_WIDE_INT h1
, h2
, hv
;
945 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
946 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
948 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
950 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
951 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
953 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
958 /* A - B == A + (-B). */
959 neg_double (l2
, h2
, &lv
, &hv
);
962 /* .. fall through ... */
965 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
969 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
972 case DIV
: case MOD
: case UDIV
: case UMOD
:
973 /* We'd need to include tree.h to do this and it doesn't seem worth
978 lv
= l1
& l2
, hv
= h1
& h2
;
982 lv
= l1
| l2
, hv
= h1
| h2
;
986 lv
= l1
^ l2
, hv
= h1
^ h2
;
992 && ((unsigned HOST_WIDE_INT
) l1
993 < (unsigned HOST_WIDE_INT
) l2
)))
1002 && ((unsigned HOST_WIDE_INT
) l1
1003 > (unsigned HOST_WIDE_INT
) l2
)))
1010 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1012 && ((unsigned HOST_WIDE_INT
) l1
1013 < (unsigned HOST_WIDE_INT
) l2
)))
1020 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1022 && ((unsigned HOST_WIDE_INT
) l1
1023 > (unsigned HOST_WIDE_INT
) l2
)))
1029 case LSHIFTRT
: case ASHIFTRT
:
1031 case ROTATE
: case ROTATERT
:
1032 #ifdef SHIFT_COUNT_TRUNCATED
1033 if (SHIFT_COUNT_TRUNCATED
)
1034 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1037 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1040 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1041 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1043 else if (code
== ASHIFT
)
1044 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1045 else if (code
== ROTATE
)
1046 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1047 else /* code == ROTATERT */
1048 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1055 return immed_double_const (lv
, hv
, mode
);
1058 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1059 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1061 /* Even if we can't compute a constant result,
1062 there are some cases worth simplifying. */
1067 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1068 when x is NaN, infinite, or finite and nonzero. They aren't
1069 when x is -0 and the rounding mode is not towards -infinity,
1070 since (-0) + 0 is then 0. */
1071 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1074 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1075 transformations are safe even for IEEE. */
1076 if (GET_CODE (op0
) == NEG
)
1077 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1078 else if (GET_CODE (op1
) == NEG
)
1079 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1081 /* (~a) + 1 -> -a */
1082 if (INTEGRAL_MODE_P (mode
)
1083 && GET_CODE (op0
) == NOT
1084 && trueop1
== const1_rtx
)
1085 return gen_rtx_NEG (mode
, XEXP (op0
, 0));
1087 /* Handle both-operands-constant cases. We can only add
1088 CONST_INTs to constants since the sum of relocatable symbols
1089 can't be handled by most assemblers. Don't add CONST_INT
1090 to CONST_INT since overflow won't be computed properly if wider
1091 than HOST_BITS_PER_WIDE_INT. */
1093 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1094 && GET_CODE (op1
) == CONST_INT
)
1095 return plus_constant (op0
, INTVAL (op1
));
1096 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1097 && GET_CODE (op0
) == CONST_INT
)
1098 return plus_constant (op1
, INTVAL (op0
));
1100 /* See if this is something like X * C - X or vice versa or
1101 if the multiplication is written as a shift. If so, we can
1102 distribute and make a new multiply, shift, or maybe just
1103 have X (if C is 2 in the example above). But don't make
1104 real multiply if we didn't have one before. */
1106 if (! FLOAT_MODE_P (mode
))
1108 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1109 rtx lhs
= op0
, rhs
= op1
;
1112 if (GET_CODE (lhs
) == NEG
)
1113 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1114 else if (GET_CODE (lhs
) == MULT
1115 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1117 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1120 else if (GET_CODE (lhs
) == ASHIFT
1121 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1122 && INTVAL (XEXP (lhs
, 1)) >= 0
1123 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1125 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1126 lhs
= XEXP (lhs
, 0);
1129 if (GET_CODE (rhs
) == NEG
)
1130 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1131 else if (GET_CODE (rhs
) == MULT
1132 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1134 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1137 else if (GET_CODE (rhs
) == ASHIFT
1138 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1139 && INTVAL (XEXP (rhs
, 1)) >= 0
1140 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1142 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1143 rhs
= XEXP (rhs
, 0);
1146 if (rtx_equal_p (lhs
, rhs
))
1148 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1149 GEN_INT (coeff0
+ coeff1
));
1150 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1154 /* If one of the operands is a PLUS or a MINUS, see if we can
1155 simplify this by the associative law.
1156 Don't use the associative law for floating point.
1157 The inaccuracy makes it nonassociative,
1158 and subtle programs can break if operations are associated. */
1160 if (INTEGRAL_MODE_P (mode
)
1161 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1162 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1163 || (GET_CODE (op0
) == CONST
1164 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1165 || (GET_CODE (op1
) == CONST
1166 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1167 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1173 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1174 using cc0, in which case we want to leave it as a COMPARE
1175 so we can distinguish it from a register-register-copy.
1177 In IEEE floating point, x-0 is not the same as x. */
1179 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1180 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1181 && trueop1
== CONST0_RTX (mode
))
1185 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1186 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1187 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1188 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1190 rtx xop00
= XEXP (op0
, 0);
1191 rtx xop10
= XEXP (op1
, 0);
1194 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1196 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1197 && GET_MODE (xop00
) == GET_MODE (xop10
)
1198 && REGNO (xop00
) == REGNO (xop10
)
1199 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1200 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1207 /* We can't assume x-x is 0 even with non-IEEE floating point,
1208 but since it is zero except in very strange circumstances, we
1209 will treat it as zero with -funsafe-math-optimizations. */
1210 if (rtx_equal_p (trueop0
, trueop1
)
1211 && ! side_effects_p (op0
)
1212 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1213 return CONST0_RTX (mode
);
1215 /* Change subtraction from zero into negation. (0 - x) is the
1216 same as -x when x is NaN, infinite, or finite and nonzero.
1217 But if the mode has signed zeros, and does not round towards
1218 -infinity, then 0 - 0 is 0, not -0. */
1219 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1220 return gen_rtx_NEG (mode
, op1
);
1222 /* (-1 - a) is ~a. */
1223 if (trueop0
== constm1_rtx
)
1224 return gen_rtx_NOT (mode
, op1
);
1226 /* Subtracting 0 has no effect unless the mode has signed zeros
1227 and supports rounding towards -infinity. In such a case,
1229 if (!(HONOR_SIGNED_ZEROS (mode
)
1230 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1231 && trueop1
== CONST0_RTX (mode
))
1234 /* See if this is something like X * C - X or vice versa or
1235 if the multiplication is written as a shift. If so, we can
1236 distribute and make a new multiply, shift, or maybe just
1237 have X (if C is 2 in the example above). But don't make
1238 real multiply if we didn't have one before. */
1240 if (! FLOAT_MODE_P (mode
))
1242 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1243 rtx lhs
= op0
, rhs
= op1
;
1246 if (GET_CODE (lhs
) == NEG
)
1247 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1248 else if (GET_CODE (lhs
) == MULT
1249 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1251 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1254 else if (GET_CODE (lhs
) == ASHIFT
1255 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1256 && INTVAL (XEXP (lhs
, 1)) >= 0
1257 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1259 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1260 lhs
= XEXP (lhs
, 0);
1263 if (GET_CODE (rhs
) == NEG
)
1264 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1265 else if (GET_CODE (rhs
) == MULT
1266 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1268 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1271 else if (GET_CODE (rhs
) == ASHIFT
1272 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1273 && INTVAL (XEXP (rhs
, 1)) >= 0
1274 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1276 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1277 rhs
= XEXP (rhs
, 0);
1280 if (rtx_equal_p (lhs
, rhs
))
1282 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1283 GEN_INT (coeff0
- coeff1
));
1284 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1288 /* (a - (-b)) -> (a + b). True even for IEEE. */
1289 if (GET_CODE (op1
) == NEG
)
1290 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1292 /* If one of the operands is a PLUS or a MINUS, see if we can
1293 simplify this by the associative law.
1294 Don't use the associative law for floating point.
1295 The inaccuracy makes it nonassociative,
1296 and subtle programs can break if operations are associated. */
1298 if (INTEGRAL_MODE_P (mode
)
1299 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1300 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1301 || (GET_CODE (op0
) == CONST
1302 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1303 || (GET_CODE (op1
) == CONST
1304 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1305 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1308 /* Don't let a relocatable value get a negative coeff. */
1309 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1310 return simplify_gen_binary (PLUS
, mode
,
1312 neg_const_int (mode
, op1
));
1314 /* (x - (x & y)) -> (x & ~y) */
1315 if (GET_CODE (op1
) == AND
)
1317 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1319 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1320 GET_MODE (XEXP (op1
, 1)));
1321 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1323 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1325 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1326 GET_MODE (XEXP (op1
, 0)));
1327 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1333 if (trueop1
== constm1_rtx
)
1335 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1337 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1340 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1341 x is NaN, since x * 0 is then also NaN. Nor is it valid
1342 when the mode has signed zeros, since multiplying a negative
1343 number by 0 will give -0, not 0. */
1344 if (!HONOR_NANS (mode
)
1345 && !HONOR_SIGNED_ZEROS (mode
)
1346 && trueop1
== CONST0_RTX (mode
)
1347 && ! side_effects_p (op0
))
1350 /* In IEEE floating point, x*1 is not equivalent to x for
1352 if (!HONOR_SNANS (mode
)
1353 && trueop1
== CONST1_RTX (mode
))
1356 /* Convert multiply by constant power of two into shift unless
1357 we are still generating RTL. This test is a kludge. */
1358 if (GET_CODE (trueop1
) == CONST_INT
1359 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1360 /* If the mode is larger than the host word size, and the
1361 uppermost bit is set, then this isn't a power of two due
1362 to implicit sign extension. */
1363 && (width
<= HOST_BITS_PER_WIDE_INT
1364 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1365 && ! rtx_equal_function_value_matters
)
1366 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1368 /* x*2 is x+x and x*(-1) is -x */
1369 if (GET_CODE (trueop1
) == CONST_DOUBLE
1370 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1371 && GET_MODE (op0
) == mode
)
1374 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1376 if (REAL_VALUES_EQUAL (d
, dconst2
))
1377 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1379 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1380 return gen_rtx_NEG (mode
, op0
);
1385 if (trueop1
== const0_rtx
)
1387 if (GET_CODE (trueop1
) == CONST_INT
1388 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1389 == GET_MODE_MASK (mode
)))
1391 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1393 /* A | (~A) -> -1 */
1394 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1395 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1396 && ! side_effects_p (op0
)
1397 && GET_MODE_CLASS (mode
) != MODE_CC
)
1402 if (trueop1
== const0_rtx
)
1404 if (GET_CODE (trueop1
) == CONST_INT
1405 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1406 == GET_MODE_MASK (mode
)))
1407 return gen_rtx_NOT (mode
, op0
);
1408 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1409 && GET_MODE_CLASS (mode
) != MODE_CC
)
1414 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1416 if (GET_CODE (trueop1
) == CONST_INT
1417 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1418 == GET_MODE_MASK (mode
)))
1420 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1421 && GET_MODE_CLASS (mode
) != MODE_CC
)
1424 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1425 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1426 && ! side_effects_p (op0
)
1427 && GET_MODE_CLASS (mode
) != MODE_CC
)
1432 /* Convert divide by power of two into shift (divide by 1 handled
1434 if (GET_CODE (trueop1
) == CONST_INT
1435 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1436 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1438 /* ... fall through ... */
1441 if (trueop1
== CONST1_RTX (mode
))
1443 /* On some platforms DIV uses narrower mode than its
1445 rtx x
= gen_lowpart_common (mode
, op0
);
1448 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1449 return gen_lowpart_SUBREG (mode
, op0
);
1454 /* Maybe change 0 / x to 0. This transformation isn't safe for
1455 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1456 Nor is it safe for modes with signed zeros, since dividing
1457 0 by a negative number gives -0, not 0. */
1458 if (!HONOR_NANS (mode
)
1459 && !HONOR_SIGNED_ZEROS (mode
)
1460 && trueop0
== CONST0_RTX (mode
)
1461 && ! side_effects_p (op1
))
1464 /* Change division by a constant into multiplication. Only do
1465 this with -funsafe-math-optimizations. */
1466 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1467 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1468 && trueop1
!= CONST0_RTX (mode
)
1469 && flag_unsafe_math_optimizations
)
1472 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1474 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1476 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1477 return gen_rtx_MULT (mode
, op0
,
1478 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1484 /* Handle modulus by power of two (mod with 1 handled below). */
1485 if (GET_CODE (trueop1
) == CONST_INT
1486 && exact_log2 (INTVAL (trueop1
)) > 0)
1487 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1489 /* ... fall through ... */
1492 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1493 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1500 /* Rotating ~0 always results in ~0. */
1501 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1502 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1503 && ! side_effects_p (op1
))
1506 /* ... fall through ... */
1510 if (trueop1
== const0_rtx
)
1512 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1517 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1518 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1519 && ! side_effects_p (op0
))
1521 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1526 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (trueop1
) == CONST_INT
1527 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1528 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1529 && ! side_effects_p (op0
))
1531 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1536 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1538 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1543 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1545 else if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1553 /* ??? There are simplifications that can be done. */
1557 if (!VECTOR_MODE_P (mode
))
1559 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1561 != GET_MODE_INNER (GET_MODE (trueop0
)))
1562 || GET_CODE (trueop1
) != PARALLEL
1563 || XVECLEN (trueop1
, 0) != 1
1564 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
1567 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1568 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
1572 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1573 || (GET_MODE_INNER (mode
)
1574 != GET_MODE_INNER (GET_MODE (trueop0
)))
1575 || GET_CODE (trueop1
) != PARALLEL
)
1578 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1580 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1581 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1582 rtvec v
= rtvec_alloc (n_elts
);
1585 if (XVECLEN (trueop1
, 0) != (int) n_elts
)
1587 for (i
= 0; i
< n_elts
; i
++)
1589 rtx x
= XVECEXP (trueop1
, 0, i
);
1591 if (GET_CODE (x
) != CONST_INT
)
1593 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
1596 return gen_rtx_CONST_VECTOR (mode
, v
);
1602 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
1603 ? GET_MODE (trueop0
)
1604 : GET_MODE_INNER (mode
));
1605 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
1606 ? GET_MODE (trueop1
)
1607 : GET_MODE_INNER (mode
));
1609 if (!VECTOR_MODE_P (mode
)
1610 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
1611 != GET_MODE_SIZE (mode
)))
1614 if ((VECTOR_MODE_P (op0_mode
)
1615 && (GET_MODE_INNER (mode
)
1616 != GET_MODE_INNER (op0_mode
)))
1617 || (!VECTOR_MODE_P (op0_mode
)
1618 && GET_MODE_INNER (mode
) != op0_mode
))
1621 if ((VECTOR_MODE_P (op1_mode
)
1622 && (GET_MODE_INNER (mode
)
1623 != GET_MODE_INNER (op1_mode
)))
1624 || (!VECTOR_MODE_P (op1_mode
)
1625 && GET_MODE_INNER (mode
) != op1_mode
))
1628 if ((GET_CODE (trueop0
) == CONST_VECTOR
1629 || GET_CODE (trueop0
) == CONST_INT
1630 || GET_CODE (trueop0
) == CONST_DOUBLE
)
1631 && (GET_CODE (trueop1
) == CONST_VECTOR
1632 || GET_CODE (trueop1
) == CONST_INT
1633 || GET_CODE (trueop1
) == CONST_DOUBLE
))
1635 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1636 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1637 rtvec v
= rtvec_alloc (n_elts
);
1639 unsigned in_n_elts
= 1;
1641 if (VECTOR_MODE_P (op0_mode
))
1642 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
1643 for (i
= 0; i
< n_elts
; i
++)
1647 if (!VECTOR_MODE_P (op0_mode
))
1648 RTVEC_ELT (v
, i
) = trueop0
;
1650 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
1654 if (!VECTOR_MODE_P (op1_mode
))
1655 RTVEC_ELT (v
, i
) = trueop1
;
1657 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
1662 return gen_rtx_CONST_VECTOR (mode
, v
);
1674 /* Get the integer argument values in two forms:
1675 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1677 arg0
= INTVAL (trueop0
);
1678 arg1
= INTVAL (trueop1
);
1680 if (width
< HOST_BITS_PER_WIDE_INT
)
1682 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1683 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1686 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1687 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1690 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1691 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1699 /* Compute the value of the arithmetic. */
1704 val
= arg0s
+ arg1s
;
1708 val
= arg0s
- arg1s
;
1712 val
= arg0s
* arg1s
;
1717 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1720 val
= arg0s
/ arg1s
;
1725 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1728 val
= arg0s
% arg1s
;
1733 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1736 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1741 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1744 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1760 /* If shift count is undefined, don't fold it; let the machine do
1761 what it wants. But truncate it if the machine will do that. */
1765 #ifdef SHIFT_COUNT_TRUNCATED
1766 if (SHIFT_COUNT_TRUNCATED
)
1770 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1777 #ifdef SHIFT_COUNT_TRUNCATED
1778 if (SHIFT_COUNT_TRUNCATED
)
1782 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1789 #ifdef SHIFT_COUNT_TRUNCATED
1790 if (SHIFT_COUNT_TRUNCATED
)
1794 val
= arg0s
>> arg1
;
1796 /* Bootstrap compiler may not have sign extended the right shift.
1797 Manually extend the sign to insure bootstrap cc matches gcc. */
1798 if (arg0s
< 0 && arg1
> 0)
1799 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1808 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1809 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1817 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1818 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1822 /* Do nothing here. */
1826 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1830 val
= ((unsigned HOST_WIDE_INT
) arg0
1831 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1835 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1839 val
= ((unsigned HOST_WIDE_INT
) arg0
1840 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1847 /* ??? There are simplifications that can be done. */
1854 val
= trunc_int_for_mode (val
, mode
);
1856 return GEN_INT (val
);
1859 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1862 Rather than test for specific case, we do this by a brute-force method
1863 and do all possible simplifications until no more changes occur. Then
1864 we rebuild the operation.
1866 If FORCE is true, then always generate the rtx. This is used to
1867 canonicalize stuff emitted from simplify_gen_binary. Note that this
1868 can still fail if the rtx is too complex. It won't fail just because
1869 the result is not 'simpler' than the input, however. */
1871 struct simplify_plus_minus_op_data
1878 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
1880 const struct simplify_plus_minus_op_data
*d1
= p1
;
1881 const struct simplify_plus_minus_op_data
*d2
= p2
;
1883 return (commutative_operand_precedence (d2
->op
)
1884 - commutative_operand_precedence (d1
->op
));
1888 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
1891 struct simplify_plus_minus_op_data ops
[8];
1893 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
1894 int first
, negate
, changed
;
1897 memset ((char *) ops
, 0, sizeof ops
);
1899 /* Set up the two operands and then expand them until nothing has been
1900 changed. If we run out of room in our array, give up; this should
1901 almost never happen. */
1906 ops
[1].neg
= (code
== MINUS
);
1912 for (i
= 0; i
< n_ops
; i
++)
1914 rtx this_op
= ops
[i
].op
;
1915 int this_neg
= ops
[i
].neg
;
1916 enum rtx_code this_code
= GET_CODE (this_op
);
1925 ops
[n_ops
].op
= XEXP (this_op
, 1);
1926 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
1929 ops
[i
].op
= XEXP (this_op
, 0);
1935 ops
[i
].op
= XEXP (this_op
, 0);
1936 ops
[i
].neg
= ! this_neg
;
1942 && GET_CODE (XEXP (this_op
, 0)) == PLUS
1943 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
1944 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
1946 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
1947 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
1948 ops
[n_ops
].neg
= this_neg
;
1956 /* ~a -> (-a - 1) */
1959 ops
[n_ops
].op
= constm1_rtx
;
1960 ops
[n_ops
++].neg
= this_neg
;
1961 ops
[i
].op
= XEXP (this_op
, 0);
1962 ops
[i
].neg
= !this_neg
;
1970 ops
[i
].op
= neg_const_int (mode
, this_op
);
1983 /* If we only have two operands, we can't do anything. */
1984 if (n_ops
<= 2 && !force
)
1987 /* Count the number of CONSTs we didn't split above. */
1988 for (i
= 0; i
< n_ops
; i
++)
1989 if (GET_CODE (ops
[i
].op
) == CONST
)
1992 /* Now simplify each pair of operands until nothing changes. The first
1993 time through just simplify constants against each other. */
2000 for (i
= 0; i
< n_ops
- 1; i
++)
2001 for (j
= i
+ 1; j
< n_ops
; j
++)
2003 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2004 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2006 if (lhs
!= 0 && rhs
!= 0
2007 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2009 enum rtx_code ncode
= PLUS
;
2015 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2017 else if (swap_commutative_operands_p (lhs
, rhs
))
2018 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2020 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2022 /* Reject "simplifications" that just wrap the two
2023 arguments in a CONST. Failure to do so can result
2024 in infinite recursion with simplify_binary_operation
2025 when it calls us to simplify CONST operations. */
2027 && ! (GET_CODE (tem
) == CONST
2028 && GET_CODE (XEXP (tem
, 0)) == ncode
2029 && XEXP (XEXP (tem
, 0), 0) == lhs
2030 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2031 /* Don't allow -x + -1 -> ~x simplifications in the
2032 first pass. This allows us the chance to combine
2033 the -1 with other constants. */
2035 && GET_CODE (tem
) == NOT
2036 && XEXP (tem
, 0) == rhs
))
2039 if (GET_CODE (tem
) == NEG
)
2040 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2041 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2042 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2046 ops
[j
].op
= NULL_RTX
;
2056 /* Pack all the operands to the lower-numbered entries. */
2057 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2062 /* Sort the operations based on swap_commutative_operands_p. */
2063 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2065 /* We suppressed creation of trivial CONST expressions in the
2066 combination loop to avoid recursion. Create one manually now.
2067 The combination loop should have ensured that there is exactly
2068 one CONST_INT, and the sort will have ensured that it is last
2069 in the array and that any other constant will be next-to-last. */
2072 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2073 && CONSTANT_P (ops
[n_ops
- 2].op
))
2075 rtx value
= ops
[n_ops
- 1].op
;
2076 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2077 value
= neg_const_int (mode
, value
);
2078 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2082 /* Count the number of CONSTs that we generated. */
2084 for (i
= 0; i
< n_ops
; i
++)
2085 if (GET_CODE (ops
[i
].op
) == CONST
)
2088 /* Give up if we didn't reduce the number of operands we had. Make
2089 sure we count a CONST as two operands. If we have the same
2090 number of operands, but have made more CONSTs than before, this
2091 is also an improvement, so accept it. */
2093 && (n_ops
+ n_consts
> input_ops
2094 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2097 /* Put a non-negated operand first. If there aren't any, make all
2098 operands positive and negate the whole thing later. */
2101 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2105 for (i
= 0; i
< n_ops
; i
++)
2117 /* Now make the result by performing the requested operations. */
2119 for (i
= 1; i
< n_ops
; i
++)
2120 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2121 mode
, result
, ops
[i
].op
);
2123 return negate
? gen_rtx_NEG (mode
, result
) : result
;
2126 /* Like simplify_binary_operation except used for relational operators.
2127 MODE is the mode of the operands, not that of the result. If MODE
2128 is VOIDmode, both operands must also be VOIDmode and we compare the
2129 operands in "infinite precision".
2131 If no simplification is possible, this function returns zero. Otherwise,
2132 it returns either const_true_rtx or const0_rtx. */
2135 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2138 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2143 if (mode
== VOIDmode
2144 && (GET_MODE (op0
) != VOIDmode
2145 || GET_MODE (op1
) != VOIDmode
))
2148 /* If op0 is a compare, extract the comparison arguments from it. */
2149 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2150 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2152 trueop0
= avoid_constant_pool_reference (op0
);
2153 trueop1
= avoid_constant_pool_reference (op1
);
2155 /* We can't simplify MODE_CC values since we don't know what the
2156 actual comparison is. */
2157 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2160 /* Make sure the constant is second. */
2161 if (swap_commutative_operands_p (trueop0
, trueop1
))
2163 tem
= op0
, op0
= op1
, op1
= tem
;
2164 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
2165 code
= swap_condition (code
);
2168 /* For integer comparisons of A and B maybe we can simplify A - B and can
2169 then simplify a comparison of that with zero. If A and B are both either
2170 a register or a CONST_INT, this can't help; testing for these cases will
2171 prevent infinite recursion here and speed things up.
2173 If CODE is an unsigned comparison, then we can never do this optimization,
2174 because it gives an incorrect result if the subtraction wraps around zero.
2175 ANSI C defines unsigned operations such that they never overflow, and
2176 thus such cases can not be ignored. */
2178 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2179 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2180 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2181 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2182 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2183 return simplify_relational_operation (signed_condition (code
),
2184 mode
, tem
, const0_rtx
);
2186 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2187 return const_true_rtx
;
2189 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2192 /* For modes without NaNs, if the two operands are equal, we know the
2193 result except if they have side-effects. */
2194 if (! HONOR_NANS (GET_MODE (trueop0
))
2195 && rtx_equal_p (trueop0
, trueop1
)
2196 && ! side_effects_p (trueop0
))
2197 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2199 /* If the operands are floating-point constants, see if we can fold
2201 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2202 && GET_CODE (trueop1
) == CONST_DOUBLE
2203 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2205 REAL_VALUE_TYPE d0
, d1
;
2207 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2208 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2210 /* Comparisons are unordered iff at least one of the values is NaN. */
2211 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2221 return const_true_rtx
;
2234 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2235 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2236 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2239 /* Otherwise, see if the operands are both integers. */
2240 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2241 && (GET_CODE (trueop0
) == CONST_DOUBLE
2242 || GET_CODE (trueop0
) == CONST_INT
)
2243 && (GET_CODE (trueop1
) == CONST_DOUBLE
2244 || GET_CODE (trueop1
) == CONST_INT
))
2246 int width
= GET_MODE_BITSIZE (mode
);
2247 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2248 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2250 /* Get the two words comprising each integer constant. */
2251 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2253 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2254 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2258 l0u
= l0s
= INTVAL (trueop0
);
2259 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2262 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2264 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2265 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2269 l1u
= l1s
= INTVAL (trueop1
);
2270 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2273 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2274 we have to sign or zero-extend the values. */
2275 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2277 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2278 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2280 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2281 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2283 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2284 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2286 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2287 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2289 equal
= (h0u
== h1u
&& l0u
== l1u
);
2290 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2291 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2292 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2293 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2296 /* Otherwise, there are some code-specific tests we can make. */
2302 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2307 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2308 return const_true_rtx
;
2312 /* Unsigned values are never negative. */
2313 if (trueop1
== const0_rtx
)
2314 return const_true_rtx
;
2318 if (trueop1
== const0_rtx
)
2323 /* Unsigned values are never greater than the largest
2325 if (GET_CODE (trueop1
) == CONST_INT
2326 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2327 && INTEGRAL_MODE_P (mode
))
2328 return const_true_rtx
;
2332 if (GET_CODE (trueop1
) == CONST_INT
2333 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2334 && INTEGRAL_MODE_P (mode
))
2339 /* Optimize abs(x) < 0.0. */
2340 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2342 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2344 if (GET_CODE (tem
) == ABS
)
2350 /* Optimize abs(x) >= 0.0. */
2351 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2353 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2355 if (GET_CODE (tem
) == ABS
)
2356 return const_true_rtx
;
2361 /* Optimize ! (abs(x) < 0.0). */
2362 if (trueop1
== CONST0_RTX (mode
))
2364 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2366 if (GET_CODE (tem
) == ABS
)
2367 return const_true_rtx
;
2378 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2384 return equal
? const_true_rtx
: const0_rtx
;
2387 return ! equal
? const_true_rtx
: const0_rtx
;
2390 return op0lt
? const_true_rtx
: const0_rtx
;
2393 return op1lt
? const_true_rtx
: const0_rtx
;
2395 return op0ltu
? const_true_rtx
: const0_rtx
;
2397 return op1ltu
? const_true_rtx
: const0_rtx
;
2400 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2403 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2405 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2407 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2409 return const_true_rtx
;
2417 /* Simplify CODE, an operation with result mode MODE and three operands,
2418 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2419 a constant. Return 0 if no simplifications is possible. */
2422 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
2423 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
2426 unsigned int width
= GET_MODE_BITSIZE (mode
);
2428 /* VOIDmode means "infinite" precision. */
2430 width
= HOST_BITS_PER_WIDE_INT
;
2436 if (GET_CODE (op0
) == CONST_INT
2437 && GET_CODE (op1
) == CONST_INT
2438 && GET_CODE (op2
) == CONST_INT
2439 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2440 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2442 /* Extracting a bit-field from a constant */
2443 HOST_WIDE_INT val
= INTVAL (op0
);
2445 if (BITS_BIG_ENDIAN
)
2446 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2447 - INTVAL (op2
) - INTVAL (op1
));
2449 val
>>= INTVAL (op2
);
2451 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2453 /* First zero-extend. */
2454 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2455 /* If desired, propagate sign bit. */
2456 if (code
== SIGN_EXTRACT
2457 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2458 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2461 /* Clear the bits that don't belong in our mode,
2462 unless they and our sign bit are all one.
2463 So we get either a reasonable negative value or a reasonable
2464 unsigned value for this mode. */
2465 if (width
< HOST_BITS_PER_WIDE_INT
2466 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2467 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2468 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2470 return GEN_INT (val
);
2475 if (GET_CODE (op0
) == CONST_INT
)
2476 return op0
!= const0_rtx
? op1
: op2
;
2478 /* Convert a == b ? b : a to "a". */
2479 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
2480 && !HONOR_NANS (mode
)
2481 && rtx_equal_p (XEXP (op0
, 0), op1
)
2482 && rtx_equal_p (XEXP (op0
, 1), op2
))
2484 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
2485 && !HONOR_NANS (mode
)
2486 && rtx_equal_p (XEXP (op0
, 1), op1
)
2487 && rtx_equal_p (XEXP (op0
, 0), op2
))
2489 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2491 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2492 ? GET_MODE (XEXP (op0
, 1))
2493 : GET_MODE (XEXP (op0
, 0)));
2495 if (cmp_mode
== VOIDmode
)
2496 cmp_mode
= op0_mode
;
2497 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2498 XEXP (op0
, 0), XEXP (op0
, 1));
2500 /* See if any simplifications were possible. */
2501 if (temp
== const0_rtx
)
2503 else if (temp
== const1_rtx
)
2508 /* Look for happy constants in op1 and op2. */
2509 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2511 HOST_WIDE_INT t
= INTVAL (op1
);
2512 HOST_WIDE_INT f
= INTVAL (op2
);
2514 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2515 code
= GET_CODE (op0
);
2516 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2519 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2527 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2532 if (GET_MODE (op0
) != mode
2533 || GET_MODE (op1
) != mode
2534 || !VECTOR_MODE_P (mode
))
2536 op2
= avoid_constant_pool_reference (op2
);
2537 if (GET_CODE (op2
) == CONST_INT
)
2539 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2540 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2541 int mask
= (1 << n_elts
) - 1;
2543 if (!(INTVAL (op2
) & mask
))
2545 if ((INTVAL (op2
) & mask
) == mask
)
2548 op0
= avoid_constant_pool_reference (op0
);
2549 op1
= avoid_constant_pool_reference (op1
);
2550 if (GET_CODE (op0
) == CONST_VECTOR
2551 && GET_CODE (op1
) == CONST_VECTOR
)
2553 rtvec v
= rtvec_alloc (n_elts
);
2556 for (i
= 0; i
< n_elts
; i
++)
2557 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
2558 ? CONST_VECTOR_ELT (op0
, i
)
2559 : CONST_VECTOR_ELT (op1
, i
));
2560 return gen_rtx_CONST_VECTOR (mode
, v
);
2572 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2573 Return 0 if no simplifications is possible. */
2575 simplify_subreg (enum machine_mode outermode
, rtx op
,
2576 enum machine_mode innermode
, unsigned int byte
)
2578 /* Little bit of sanity checking. */
2579 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2580 || innermode
== BLKmode
|| outermode
== BLKmode
)
2583 if (GET_MODE (op
) != innermode
2584 && GET_MODE (op
) != VOIDmode
)
2587 if (byte
% GET_MODE_SIZE (outermode
)
2588 || byte
>= GET_MODE_SIZE (innermode
))
2591 if (outermode
== innermode
&& !byte
)
2594 /* Simplify subregs of vector constants. */
2595 if (GET_CODE (op
) == CONST_VECTOR
)
2597 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (innermode
));
2598 const unsigned int offset
= byte
/ elt_size
;
2601 if (GET_MODE_INNER (innermode
) == outermode
)
2603 elt
= CONST_VECTOR_ELT (op
, offset
);
2605 /* ?? We probably don't need this copy_rtx because constants
2606 can be shared. ?? */
2608 return copy_rtx (elt
);
2610 else if (GET_MODE_INNER (innermode
) == GET_MODE_INNER (outermode
)
2611 && GET_MODE_SIZE (innermode
) > GET_MODE_SIZE (outermode
))
2613 return (gen_rtx_CONST_VECTOR
2615 gen_rtvec_v (GET_MODE_NUNITS (outermode
),
2616 &CONST_VECTOR_ELT (op
, offset
))));
2618 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2619 && (GET_MODE_SIZE (outermode
) % elt_size
== 0))
2621 /* This happens when the target register size is smaller then
2622 the vector mode, and we synthesize operations with vectors
2623 of elements that are smaller than the register size. */
2624 HOST_WIDE_INT sum
= 0, high
= 0;
2625 unsigned n_elts
= (GET_MODE_SIZE (outermode
) / elt_size
);
2626 unsigned i
= BYTES_BIG_ENDIAN
? offset
: offset
+ n_elts
- 1;
2627 unsigned step
= BYTES_BIG_ENDIAN
? 1 : -1;
2628 int shift
= BITS_PER_UNIT
* elt_size
;
2629 unsigned HOST_WIDE_INT unit_mask
;
2631 unit_mask
= (unsigned HOST_WIDE_INT
) -1
2632 >> (sizeof (HOST_WIDE_INT
) * BITS_PER_UNIT
- shift
);
2634 for (; n_elts
--; i
+= step
)
2636 elt
= CONST_VECTOR_ELT (op
, i
);
2637 if (GET_CODE (elt
) == CONST_DOUBLE
2638 && GET_MODE_CLASS (GET_MODE (elt
)) == MODE_FLOAT
)
2640 elt
= gen_lowpart_common (int_mode_for_mode (GET_MODE (elt
)),
2645 if (GET_CODE (elt
) != CONST_INT
)
2647 /* Avoid overflow. */
2648 if (high
>> (HOST_BITS_PER_WIDE_INT
- shift
))
2650 high
= high
<< shift
| sum
>> (HOST_BITS_PER_WIDE_INT
- shift
);
2651 sum
= (sum
<< shift
) + (INTVAL (elt
) & unit_mask
);
2653 if (GET_MODE_BITSIZE (outermode
) <= HOST_BITS_PER_WIDE_INT
)
2654 return GEN_INT (trunc_int_for_mode (sum
, outermode
));
2655 else if (GET_MODE_BITSIZE (outermode
) == 2* HOST_BITS_PER_WIDE_INT
)
2656 return immed_double_const (sum
, high
, outermode
);
2660 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2661 && (elt_size
% GET_MODE_SIZE (outermode
) == 0))
2663 enum machine_mode new_mode
2664 = int_mode_for_mode (GET_MODE_INNER (innermode
));
2665 int subbyte
= byte
% elt_size
;
2667 op
= simplify_subreg (new_mode
, op
, innermode
, byte
- subbyte
);
2670 return simplify_subreg (outermode
, op
, new_mode
, subbyte
);
2672 else if (GET_MODE_CLASS (outermode
) == MODE_INT
)
2673 /* This shouldn't happen, but let's not do anything stupid. */
2677 /* Attempt to simplify constant to non-SUBREG expression. */
2678 if (CONSTANT_P (op
))
2681 unsigned HOST_WIDE_INT val
= 0;
2683 if (VECTOR_MODE_P (outermode
))
2685 /* Construct a CONST_VECTOR from individual subregs. */
2686 enum machine_mode submode
= GET_MODE_INNER (outermode
);
2687 int subsize
= GET_MODE_UNIT_SIZE (outermode
);
2688 int i
, elts
= GET_MODE_NUNITS (outermode
);
2689 rtvec v
= rtvec_alloc (elts
);
2692 for (i
= 0; i
< elts
; i
++, byte
+= subsize
)
2694 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2695 /* ??? It would be nice if we could actually make such subregs
2696 on targets that allow such relocations. */
2697 if (byte
>= GET_MODE_SIZE (innermode
))
2698 elt
= CONST0_RTX (submode
);
2700 elt
= simplify_subreg (submode
, op
, innermode
, byte
);
2703 RTVEC_ELT (v
, i
) = elt
;
2705 return gen_rtx_CONST_VECTOR (outermode
, v
);
2708 /* ??? This code is partly redundant with code below, but can handle
2709 the subregs of floats and similar corner cases.
2710 Later it we should move all simplification code here and rewrite
2711 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2712 using SIMPLIFY_SUBREG. */
2713 if (subreg_lowpart_offset (outermode
, innermode
) == byte
2714 && GET_CODE (op
) != CONST_VECTOR
)
2716 rtx
new = gen_lowpart_if_possible (outermode
, op
);
2721 /* Similar comment as above apply here. */
2722 if (GET_MODE_SIZE (outermode
) == UNITS_PER_WORD
2723 && GET_MODE_SIZE (innermode
) > UNITS_PER_WORD
2724 && GET_MODE_CLASS (outermode
) == MODE_INT
)
2726 rtx
new = constant_subword (op
,
2727 (byte
/ UNITS_PER_WORD
),
2733 if (GET_MODE_CLASS (outermode
) != MODE_INT
2734 && GET_MODE_CLASS (outermode
) != MODE_CC
)
2736 enum machine_mode new_mode
= int_mode_for_mode (outermode
);
2738 if (new_mode
!= innermode
|| byte
!= 0)
2740 op
= simplify_subreg (new_mode
, op
, innermode
, byte
);
2743 return simplify_subreg (outermode
, op
, new_mode
, 0);
2747 offset
= byte
* BITS_PER_UNIT
;
2748 switch (GET_CODE (op
))
2751 if (GET_MODE (op
) != VOIDmode
)
2754 /* We can't handle this case yet. */
2755 if (GET_MODE_BITSIZE (outermode
) >= HOST_BITS_PER_WIDE_INT
)
2758 part
= offset
>= HOST_BITS_PER_WIDE_INT
;
2759 if ((BITS_PER_WORD
> HOST_BITS_PER_WIDE_INT
2760 && BYTES_BIG_ENDIAN
)
2761 || (BITS_PER_WORD
<= HOST_BITS_PER_WIDE_INT
2762 && WORDS_BIG_ENDIAN
))
2764 val
= part
? CONST_DOUBLE_HIGH (op
) : CONST_DOUBLE_LOW (op
);
2765 offset
%= HOST_BITS_PER_WIDE_INT
;
2767 /* We've already picked the word we want from a double, so
2768 pretend this is actually an integer. */
2769 innermode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
2773 if (GET_CODE (op
) == CONST_INT
)
2776 /* We don't handle synthesizing of non-integral constants yet. */
2777 if (GET_MODE_CLASS (outermode
) != MODE_INT
)
2780 if (BYTES_BIG_ENDIAN
|| WORDS_BIG_ENDIAN
)
2782 if (WORDS_BIG_ENDIAN
)
2783 offset
= (GET_MODE_BITSIZE (innermode
)
2784 - GET_MODE_BITSIZE (outermode
) - offset
);
2785 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
2786 && GET_MODE_SIZE (outermode
) < UNITS_PER_WORD
)
2787 offset
= (offset
+ BITS_PER_WORD
- GET_MODE_BITSIZE (outermode
)
2788 - 2 * (offset
% BITS_PER_WORD
));
2791 if (offset
>= HOST_BITS_PER_WIDE_INT
)
2792 return ((HOST_WIDE_INT
) val
< 0) ? constm1_rtx
: const0_rtx
;
2796 if (GET_MODE_BITSIZE (outermode
) < HOST_BITS_PER_WIDE_INT
)
2797 val
= trunc_int_for_mode (val
, outermode
);
2798 return GEN_INT (val
);
2805 /* Changing mode twice with SUBREG => just change it once,
2806 or not at all if changing back op starting mode. */
2807 if (GET_CODE (op
) == SUBREG
)
2809 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
2810 int final_offset
= byte
+ SUBREG_BYTE (op
);
2813 if (outermode
== innermostmode
2814 && byte
== 0 && SUBREG_BYTE (op
) == 0)
2815 return SUBREG_REG (op
);
2817 /* The SUBREG_BYTE represents offset, as if the value were stored
2818 in memory. Irritating exception is paradoxical subreg, where
2819 we define SUBREG_BYTE to be 0. On big endian machines, this
2820 value should be negative. For a moment, undo this exception. */
2821 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
2823 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
2824 if (WORDS_BIG_ENDIAN
)
2825 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2826 if (BYTES_BIG_ENDIAN
)
2827 final_offset
+= difference
% UNITS_PER_WORD
;
2829 if (SUBREG_BYTE (op
) == 0
2830 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
2832 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
2833 if (WORDS_BIG_ENDIAN
)
2834 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2835 if (BYTES_BIG_ENDIAN
)
2836 final_offset
+= difference
% UNITS_PER_WORD
;
2839 /* See whether resulting subreg will be paradoxical. */
2840 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
2842 /* In nonparadoxical subregs we can't handle negative offsets. */
2843 if (final_offset
< 0)
2845 /* Bail out in case resulting subreg would be incorrect. */
2846 if (final_offset
% GET_MODE_SIZE (outermode
)
2847 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
2853 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
2855 /* In paradoxical subreg, see if we are still looking on lower part.
2856 If so, our SUBREG_BYTE will be 0. */
2857 if (WORDS_BIG_ENDIAN
)
2858 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
2859 if (BYTES_BIG_ENDIAN
)
2860 offset
+= difference
% UNITS_PER_WORD
;
2861 if (offset
== final_offset
)
2867 /* Recurse for further possible simplifications. */
2868 new = simplify_subreg (outermode
, SUBREG_REG (op
),
2869 GET_MODE (SUBREG_REG (op
)),
2873 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
2876 /* SUBREG of a hard register => just change the register number
2877 and/or mode. If the hard register is not valid in that mode,
2878 suppress this simplification. If the hard register is the stack,
2879 frame, or argument pointer, leave this as a SUBREG. */
2882 && (! REG_FUNCTION_VALUE_P (op
)
2883 || ! rtx_equal_function_value_matters
)
2884 && REGNO (op
) < FIRST_PSEUDO_REGISTER
2885 #ifdef CANNOT_CHANGE_MODE_CLASS
2886 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
2887 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
2888 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
2890 && ((reload_completed
&& !frame_pointer_needed
)
2891 || (REGNO (op
) != FRAME_POINTER_REGNUM
2892 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2893 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
2896 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2897 && REGNO (op
) != ARG_POINTER_REGNUM
2899 && REGNO (op
) != STACK_POINTER_REGNUM
)
2901 int final_regno
= subreg_hard_regno (gen_rtx_SUBREG (outermode
, op
, byte
),
2904 /* ??? We do allow it if the current REG is not valid for
2905 its mode. This is a kludge to work around how float/complex
2906 arguments are passed on 32-bit SPARC and should be fixed. */
2907 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
2908 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
2910 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
2912 /* Propagate original regno. We don't have any way to specify
2913 the offset inside original regno, so do so only for lowpart.
2914 The information is used only by alias analysis that can not
2915 grog partial register anyway. */
2917 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
2918 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
2923 /* If we have a SUBREG of a register that we are replacing and we are
2924 replacing it with a MEM, make a new MEM and try replacing the
2925 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2926 or if we would be widening it. */
2928 if (GET_CODE (op
) == MEM
2929 && ! mode_dependent_address_p (XEXP (op
, 0))
2930 /* Allow splitting of volatile memory references in case we don't
2931 have instruction to move the whole thing. */
2932 && (! MEM_VOLATILE_P (op
)
2933 || ! have_insn_for (SET
, innermode
))
2934 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
2935 return adjust_address_nv (op
, outermode
, byte
);
2937 /* Handle complex values represented as CONCAT
2938 of real and imaginary part. */
2939 if (GET_CODE (op
) == CONCAT
)
2941 int is_realpart
= byte
< GET_MODE_UNIT_SIZE (innermode
);
2942 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
2943 unsigned int final_offset
;
2946 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
2947 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
2950 /* We can at least simplify it by referring directly to the relevant part. */
2951 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
2956 /* Make a SUBREG operation or equivalent if it folds. */
2959 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
2960 enum machine_mode innermode
, unsigned int byte
)
2963 /* Little bit of sanity checking. */
2964 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2965 || innermode
== BLKmode
|| outermode
== BLKmode
)
2968 if (GET_MODE (op
) != innermode
2969 && GET_MODE (op
) != VOIDmode
)
2972 if (byte
% GET_MODE_SIZE (outermode
)
2973 || byte
>= GET_MODE_SIZE (innermode
))
2976 if (GET_CODE (op
) == QUEUED
)
2979 new = simplify_subreg (outermode
, op
, innermode
, byte
);
2983 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
2986 return gen_rtx_SUBREG (outermode
, op
, byte
);
2988 /* Simplify X, an rtx expression.
2990 Return the simplified expression or NULL if no simplifications
2993 This is the preferred entry point into the simplification routines;
2994 however, we still allow passes to call the more specific routines.
2996 Right now GCC has three (yes, three) major bodies of RTL simplification
2997 code that need to be unified.
2999 1. fold_rtx in cse.c. This code uses various CSE specific
3000 information to aid in RTL simplification.
3002 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3003 it uses combine specific information to aid in RTL
3006 3. The routines in this file.
3009 Long term we want to only have one body of simplification code; to
3010 get to that state I recommend the following steps:
3012 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3013 which are not pass dependent state into these routines.
3015 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3016 use this routine whenever possible.
3018 3. Allow for pass dependent state to be provided to these
3019 routines and add simplifications based on the pass dependent
3020 state. Remove code from cse.c & combine.c that becomes
3023 It will take time, but ultimately the compiler will be easier to
3024 maintain and improve. It's totally silly that when we add a
3025 simplification that it needs to be added to 4 places (3 for RTL
3026 simplification and 1 for tree simplification. */
3029 simplify_rtx (rtx x
)
3031 enum rtx_code code
= GET_CODE (x
);
3032 enum machine_mode mode
= GET_MODE (x
);
3034 switch (GET_RTX_CLASS (code
))
3037 return simplify_unary_operation (code
, mode
,
3038 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3040 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3045 XEXP (x
, 0) = XEXP (x
, 1);
3047 return simplify_binary_operation (code
, mode
,
3048 XEXP (x
, 0), XEXP (x
, 1));
3052 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3056 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3057 XEXP (x
, 0), XEXP (x
, 1),
3061 return simplify_relational_operation (code
,
3062 ((GET_MODE (XEXP (x
, 0))
3064 ? GET_MODE (XEXP (x
, 0))
3065 : GET_MODE (XEXP (x
, 1))),
3066 XEXP (x
, 0), XEXP (x
, 1));
3069 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3070 GET_MODE (SUBREG_REG (x
)),
3072 if (code
== CONSTANT_P_RTX
)
3074 if (CONSTANT_P (XEXP (x
, 0)))