1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static bool plus_minus_operand_p (rtx
);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
57 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
59 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
61 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
62 enum machine_mode
, rtx
, rtx
);
63 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
64 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
70 neg_const_int (enum machine_mode mode
, rtx i
)
72 return gen_int_mode (- INTVAL (i
), mode
);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
79 mode_signbit_p (enum machine_mode mode
, rtx x
)
81 unsigned HOST_WIDE_INT val
;
84 if (GET_MODE_CLASS (mode
) != MODE_INT
)
87 width
= GET_MODE_BITSIZE (mode
);
91 if (width
<= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x
) == CONST_INT
)
94 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x
) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x
) == 0)
98 val
= CONST_DOUBLE_HIGH (x
);
99 width
-= HOST_BITS_PER_WIDE_INT
;
104 if (width
< HOST_BITS_PER_WIDE_INT
)
105 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
106 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
118 /* Put complex operands first and constants second if commutative. */
119 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
120 && swap_commutative_operands_p (op0
, op1
))
121 tem
= op0
, op0
= op1
, op1
= tem
;
123 /* If this simplifies, do it. */
124 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
128 /* Handle addition and subtraction specially. Otherwise, just form
131 if (code
== PLUS
|| code
== MINUS
)
133 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
138 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
141 /* If X is a MEM referencing the constant pool, return the real value.
142 Otherwise return X. */
144 avoid_constant_pool_reference (rtx x
)
147 enum machine_mode cmode
;
149 switch (GET_CODE (x
))
155 /* Handle float extensions of constant pool references. */
157 c
= avoid_constant_pool_reference (tmp
);
158 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
162 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
163 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
173 /* Call target hook to avoid the effects of -fpic etc.... */
174 addr
= targetm
.delegitimize_address (addr
);
176 if (GET_CODE (addr
) == LO_SUM
)
177 addr
= XEXP (addr
, 1);
179 if (GET_CODE (addr
) != SYMBOL_REF
180 || ! CONSTANT_POOL_ADDRESS_P (addr
))
183 c
= get_pool_constant (addr
);
184 cmode
= get_pool_mode (addr
);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (cmode
!= GET_MODE (x
))
191 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
198 /* Make a unary operation by first seeing if it folds and otherwise making
199 the specified operation. */
202 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
203 enum machine_mode op_mode
)
207 /* If this simplifies, use it. */
208 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
211 return gen_rtx_fmt_e (code
, mode
, op
);
214 /* Likewise for ternary operations. */
217 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
218 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
222 /* If this simplifies, use it. */
223 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
227 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
230 /* Likewise, for relational operations.
231 CMP_MODE specifies mode comparison is done in. */
234 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
235 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
239 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
243 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
246 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
247 resulting RTX. Return a new RTX which is as simplified as possible. */
250 simplify_replace_rtx (rtx x
, rtx old_rtx
, rtx new_rtx
)
252 enum rtx_code code
= GET_CODE (x
);
253 enum machine_mode mode
= GET_MODE (x
);
254 enum machine_mode op_mode
;
257 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
258 to build a new expression substituting recursively. If we can't do
259 anything, return our input. */
264 switch (GET_RTX_CLASS (code
))
268 op_mode
= GET_MODE (op0
);
269 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
270 if (op0
== XEXP (x
, 0))
272 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
276 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
277 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
278 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
280 return simplify_gen_binary (code
, mode
, op0
, op1
);
283 case RTX_COMM_COMPARE
:
286 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
287 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
288 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
289 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
291 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
294 case RTX_BITFIELD_OPS
:
296 op_mode
= GET_MODE (op0
);
297 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
298 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
299 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
300 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
302 if (op_mode
== VOIDmode
)
303 op_mode
= GET_MODE (op0
);
304 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
307 /* The only case we try to handle is a SUBREG. */
310 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
311 if (op0
== SUBREG_REG (x
))
313 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
314 GET_MODE (SUBREG_REG (x
)),
316 return op0
? op0
: x
;
323 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
324 if (op0
== XEXP (x
, 0))
326 return replace_equiv_address_nv (x
, op0
);
328 else if (code
== LO_SUM
)
330 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
331 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
333 /* (lo_sum (high x) x) -> x */
334 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
337 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
339 return gen_rtx_LO_SUM (mode
, op0
, op1
);
341 else if (code
== REG
)
343 if (rtx_equal_p (x
, old_rtx
))
354 /* Try to simplify a unary operation CODE whose output mode is to be
355 MODE with input operand OP whose mode was originally OP_MODE.
356 Return zero if no simplification can be made. */
358 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
359 rtx op
, enum machine_mode op_mode
)
363 if (GET_CODE (op
) == CONST
)
366 trueop
= avoid_constant_pool_reference (op
);
368 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
372 return simplify_unary_operation_1 (code
, mode
, op
);
375 /* Perform some simplifications we can do even if the operands
378 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
380 enum rtx_code reversed
;
386 /* (not (not X)) == X. */
387 if (GET_CODE (op
) == NOT
)
390 /* (not (eq X Y)) == (ne X Y), etc. */
391 if (COMPARISON_P (op
)
392 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
393 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
394 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
395 XEXP (op
, 0), XEXP (op
, 1));
397 /* (not (plus X -1)) can become (neg X). */
398 if (GET_CODE (op
) == PLUS
399 && XEXP (op
, 1) == constm1_rtx
)
400 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
402 /* Similarly, (not (neg X)) is (plus X -1). */
403 if (GET_CODE (op
) == NEG
)
404 return plus_constant (XEXP (op
, 0), -1);
406 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
407 if (GET_CODE (op
) == XOR
408 && GET_CODE (XEXP (op
, 1)) == CONST_INT
409 && (temp
= simplify_unary_operation (NOT
, mode
,
410 XEXP (op
, 1), mode
)) != 0)
411 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
413 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
414 if (GET_CODE (op
) == PLUS
415 && GET_CODE (XEXP (op
, 1)) == CONST_INT
416 && mode_signbit_p (mode
, XEXP (op
, 1))
417 && (temp
= simplify_unary_operation (NOT
, mode
,
418 XEXP (op
, 1), mode
)) != 0)
419 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
422 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
423 operands other than 1, but that is not valid. We could do a
424 similar simplification for (not (lshiftrt C X)) where C is
425 just the sign bit, but this doesn't seem common enough to
427 if (GET_CODE (op
) == ASHIFT
428 && XEXP (op
, 0) == const1_rtx
)
430 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
431 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
434 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
435 by reversing the comparison code if valid. */
436 if (STORE_FLAG_VALUE
== -1
438 && (reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
)
439 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
440 XEXP (op
, 0), XEXP (op
, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE
== -1
447 && GET_CODE (op
) == ASHIFTRT
448 && GET_CODE (XEXP (op
, 1)) == CONST_INT
449 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
450 return simplify_gen_relational (GE
, mode
, VOIDmode
,
451 XEXP (op
, 0), const0_rtx
);
456 /* (neg (neg X)) == X. */
457 if (GET_CODE (op
) == NEG
)
460 /* (neg (plus X 1)) can become (not X). */
461 if (GET_CODE (op
) == PLUS
462 && XEXP (op
, 1) == const1_rtx
)
463 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
465 /* Similarly, (neg (not X)) is (plus X 1). */
466 if (GET_CODE (op
) == NOT
)
467 return plus_constant (XEXP (op
, 0), 1);
469 /* (neg (minus X Y)) can become (minus Y X). This transformation
470 isn't safe for modes with signed zeros, since if X and Y are
471 both +0, (minus Y X) is the same as (minus X Y). If the
472 rounding mode is towards +infinity (or -infinity) then the two
473 expressions will be rounded differently. */
474 if (GET_CODE (op
) == MINUS
475 && !HONOR_SIGNED_ZEROS (mode
)
476 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
477 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
479 if (GET_CODE (op
) == PLUS
480 && !HONOR_SIGNED_ZEROS (mode
)
481 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
483 /* (neg (plus A C)) is simplified to (minus -C A). */
484 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
485 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
487 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
489 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
492 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
493 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
494 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
497 /* (neg (mult A B)) becomes (mult (neg A) B).
498 This works even for floating-point values. */
499 if (GET_CODE (op
) == MULT
500 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
502 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
503 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
506 /* NEG commutes with ASHIFT since it is multiplication. Only do
507 this if we can then eliminate the NEG (e.g., if the operand
509 if (GET_CODE (op
) == ASHIFT
)
511 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
513 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
516 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
517 C is equal to the width of MODE minus 1. */
518 if (GET_CODE (op
) == ASHIFTRT
519 && GET_CODE (XEXP (op
, 1)) == CONST_INT
520 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
521 return simplify_gen_binary (LSHIFTRT
, mode
,
522 XEXP (op
, 0), XEXP (op
, 1));
524 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
525 C is equal to the width of MODE minus 1. */
526 if (GET_CODE (op
) == LSHIFTRT
527 && GET_CODE (XEXP (op
, 1)) == CONST_INT
528 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
529 return simplify_gen_binary (ASHIFTRT
, mode
,
530 XEXP (op
, 0), XEXP (op
, 1));
535 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
536 becomes just the MINUS if its mode is MODE. This allows
537 folding switch statements on machines using casesi (such as
539 if (GET_CODE (op
) == TRUNCATE
540 && GET_MODE (XEXP (op
, 0)) == mode
541 && GET_CODE (XEXP (op
, 0)) == MINUS
542 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
543 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
546 /* Check for a sign extension of a subreg of a promoted
547 variable, where the promotion is sign-extended, and the
548 target mode is the same as the variable's promotion. */
549 if (GET_CODE (op
) == SUBREG
550 && SUBREG_PROMOTED_VAR_P (op
)
551 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
552 && GET_MODE (XEXP (op
, 0)) == mode
)
555 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
556 if (! POINTERS_EXTEND_UNSIGNED
557 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
559 || (GET_CODE (op
) == SUBREG
560 && REG_P (SUBREG_REG (op
))
561 && REG_POINTER (SUBREG_REG (op
))
562 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
563 return convert_memory_address (Pmode
, op
);
568 /* Check for a zero extension of a subreg of a promoted
569 variable, where the promotion is zero-extended, and the
570 target mode is the same as the variable's promotion. */
571 if (GET_CODE (op
) == SUBREG
572 && SUBREG_PROMOTED_VAR_P (op
)
573 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
574 && GET_MODE (XEXP (op
, 0)) == mode
)
577 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
578 if (POINTERS_EXTEND_UNSIGNED
> 0
579 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
581 || (GET_CODE (op
) == SUBREG
582 && REG_P (SUBREG_REG (op
))
583 && REG_POINTER (SUBREG_REG (op
))
584 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
585 return convert_memory_address (Pmode
, op
);
596 /* Try to compute the value of a unary operation CODE whose output mode is to
597 be MODE with input operand OP whose mode was originally OP_MODE.
598 Return zero if the value cannot be computed. */
600 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
601 rtx op
, enum machine_mode op_mode
)
603 unsigned int width
= GET_MODE_BITSIZE (mode
);
605 if (code
== VEC_DUPLICATE
)
607 gcc_assert (VECTOR_MODE_P (mode
));
608 if (GET_MODE (op
) != VOIDmode
)
610 if (!VECTOR_MODE_P (GET_MODE (op
)))
611 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
613 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
616 if (GET_CODE (op
) == CONST_INT
|| GET_CODE (op
) == CONST_DOUBLE
617 || GET_CODE (op
) == CONST_VECTOR
)
619 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
620 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
621 rtvec v
= rtvec_alloc (n_elts
);
624 if (GET_CODE (op
) != CONST_VECTOR
)
625 for (i
= 0; i
< n_elts
; i
++)
626 RTVEC_ELT (v
, i
) = op
;
629 enum machine_mode inmode
= GET_MODE (op
);
630 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
631 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
633 gcc_assert (in_n_elts
< n_elts
);
634 gcc_assert ((n_elts
% in_n_elts
) == 0);
635 for (i
= 0; i
< n_elts
; i
++)
636 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
638 return gen_rtx_CONST_VECTOR (mode
, v
);
642 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
644 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
645 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
646 enum machine_mode opmode
= GET_MODE (op
);
647 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
648 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
649 rtvec v
= rtvec_alloc (n_elts
);
652 gcc_assert (op_n_elts
== n_elts
);
653 for (i
= 0; i
< n_elts
; i
++)
655 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
656 CONST_VECTOR_ELT (op
, i
),
657 GET_MODE_INNER (opmode
));
660 RTVEC_ELT (v
, i
) = x
;
662 return gen_rtx_CONST_VECTOR (mode
, v
);
665 /* The order of these tests is critical so that, for example, we don't
666 check the wrong mode (input vs. output) for a conversion operation,
667 such as FIX. At some point, this should be simplified. */
669 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
670 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
672 HOST_WIDE_INT hv
, lv
;
675 if (GET_CODE (op
) == CONST_INT
)
676 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
678 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
680 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
681 d
= real_value_truncate (mode
, d
);
682 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
684 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
685 && (GET_CODE (op
) == CONST_DOUBLE
686 || GET_CODE (op
) == CONST_INT
))
688 HOST_WIDE_INT hv
, lv
;
691 if (GET_CODE (op
) == CONST_INT
)
692 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
694 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
696 if (op_mode
== VOIDmode
)
698 /* We don't know how to interpret negative-looking numbers in
699 this case, so don't try to fold those. */
703 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
706 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
708 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
709 d
= real_value_truncate (mode
, d
);
710 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
713 if (GET_CODE (op
) == CONST_INT
714 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
716 HOST_WIDE_INT arg0
= INTVAL (op
);
730 val
= (arg0
>= 0 ? arg0
: - arg0
);
734 /* Don't use ffs here. Instead, get low order bit and then its
735 number. If arg0 is zero, this will return 0, as desired. */
736 arg0
&= GET_MODE_MASK (mode
);
737 val
= exact_log2 (arg0
& (- arg0
)) + 1;
741 arg0
&= GET_MODE_MASK (mode
);
742 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
745 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
749 arg0
&= GET_MODE_MASK (mode
);
752 /* Even if the value at zero is undefined, we have to come
753 up with some replacement. Seems good enough. */
754 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
755 val
= GET_MODE_BITSIZE (mode
);
758 val
= exact_log2 (arg0
& -arg0
);
762 arg0
&= GET_MODE_MASK (mode
);
765 val
++, arg0
&= arg0
- 1;
769 arg0
&= GET_MODE_MASK (mode
);
772 val
++, arg0
&= arg0
- 1;
781 /* When zero-extending a CONST_INT, we need to know its
783 gcc_assert (op_mode
!= VOIDmode
);
784 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
786 /* If we were really extending the mode,
787 we would have to distinguish between zero-extension
788 and sign-extension. */
789 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
792 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
793 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
799 if (op_mode
== VOIDmode
)
801 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
803 /* If we were really extending the mode,
804 we would have to distinguish between zero-extension
805 and sign-extension. */
806 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
809 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
812 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
814 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
815 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
832 return gen_int_mode (val
, mode
);
835 /* We can do some operations on integer CONST_DOUBLEs. Also allow
836 for a DImode operation on a CONST_INT. */
837 else if (GET_MODE (op
) == VOIDmode
838 && width
<= HOST_BITS_PER_WIDE_INT
* 2
839 && (GET_CODE (op
) == CONST_DOUBLE
840 || GET_CODE (op
) == CONST_INT
))
842 unsigned HOST_WIDE_INT l1
, lv
;
843 HOST_WIDE_INT h1
, hv
;
845 if (GET_CODE (op
) == CONST_DOUBLE
)
846 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
848 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
858 neg_double (l1
, h1
, &lv
, &hv
);
863 neg_double (l1
, h1
, &lv
, &hv
);
875 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
878 lv
= exact_log2 (l1
& -l1
) + 1;
884 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
885 - HOST_BITS_PER_WIDE_INT
;
887 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
888 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
889 lv
= GET_MODE_BITSIZE (mode
);
895 lv
= exact_log2 (l1
& -l1
);
897 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
898 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
899 lv
= GET_MODE_BITSIZE (mode
);
922 /* This is just a change-of-mode, so do nothing. */
927 gcc_assert (op_mode
!= VOIDmode
);
929 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
933 lv
= l1
& GET_MODE_MASK (op_mode
);
937 if (op_mode
== VOIDmode
938 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
942 lv
= l1
& GET_MODE_MASK (op_mode
);
943 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
944 && (lv
& ((HOST_WIDE_INT
) 1
945 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
946 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
948 hv
= HWI_SIGN_EXTEND (lv
);
959 return immed_double_const (lv
, hv
, mode
);
962 else if (GET_CODE (op
) == CONST_DOUBLE
963 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
965 REAL_VALUE_TYPE d
, t
;
966 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
971 if (HONOR_SNANS (mode
) && real_isnan (&d
))
973 real_sqrt (&t
, mode
, &d
);
977 d
= REAL_VALUE_ABS (d
);
980 d
= REAL_VALUE_NEGATE (d
);
983 d
= real_value_truncate (mode
, d
);
986 /* All this does is change the mode. */
989 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
996 real_to_target (tmp
, &d
, GET_MODE (op
));
997 for (i
= 0; i
< 4; i
++)
999 real_from_target (&d
, tmp
, mode
);
1005 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1008 else if (GET_CODE (op
) == CONST_DOUBLE
1009 && GET_MODE_CLASS (GET_MODE (op
)) == MODE_FLOAT
1010 && GET_MODE_CLASS (mode
) == MODE_INT
1011 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1013 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1014 operators are intentionally left unspecified (to ease implementation
1015 by target backends), for consistency, this routine implements the
1016 same semantics for constant folding as used by the middle-end. */
1018 /* This was formerly used only for non-IEEE float.
1019 eggert@twinsun.com says it is safe for IEEE also. */
1020 HOST_WIDE_INT xh
, xl
, th
, tl
;
1021 REAL_VALUE_TYPE x
, t
;
1022 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1026 if (REAL_VALUE_ISNAN (x
))
1029 /* Test against the signed upper bound. */
1030 if (width
> HOST_BITS_PER_WIDE_INT
)
1032 th
= ((unsigned HOST_WIDE_INT
) 1
1033 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1039 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1041 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1042 if (REAL_VALUES_LESS (t
, x
))
1049 /* Test against the signed lower bound. */
1050 if (width
> HOST_BITS_PER_WIDE_INT
)
1052 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1058 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1060 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1061 if (REAL_VALUES_LESS (x
, t
))
1067 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1071 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1074 /* Test against the unsigned upper bound. */
1075 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1080 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1082 th
= ((unsigned HOST_WIDE_INT
) 1
1083 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1089 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1091 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1092 if (REAL_VALUES_LESS (t
, x
))
1099 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1105 return immed_double_const (xl
, xh
, mode
);
1111 /* Subroutine of simplify_binary_operation to simplify a commutative,
1112 associative binary operation CODE with result mode MODE, operating
1113 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1114 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1115 canonicalization is possible. */
1118 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1123 /* Linearize the operator to the left. */
1124 if (GET_CODE (op1
) == code
)
1126 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1127 if (GET_CODE (op0
) == code
)
1129 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1130 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1133 /* "a op (b op c)" becomes "(b op c) op a". */
1134 if (! swap_commutative_operands_p (op1
, op0
))
1135 return simplify_gen_binary (code
, mode
, op1
, op0
);
1142 if (GET_CODE (op0
) == code
)
1144 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1145 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1147 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1148 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1151 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1152 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1153 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1154 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1156 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1158 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1159 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1160 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1161 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1163 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1170 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1171 and OP1. Return 0 if no simplification is possible.
1173 Don't use this for relational operations such as EQ or LT.
1174 Use simplify_relational_operation instead. */
1176 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1179 rtx trueop0
, trueop1
;
1182 /* Relational operations don't work here. We must know the mode
1183 of the operands in order to do the comparison correctly.
1184 Assuming a full word can give incorrect results.
1185 Consider comparing 128 with -128 in QImode. */
1186 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1187 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1189 /* Make sure the constant is second. */
1190 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1191 && swap_commutative_operands_p (op0
, op1
))
1193 tem
= op0
, op0
= op1
, op1
= tem
;
1196 trueop0
= avoid_constant_pool_reference (op0
);
1197 trueop1
= avoid_constant_pool_reference (op1
);
1199 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1202 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1206 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1207 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1211 unsigned int width
= GET_MODE_BITSIZE (mode
);
1213 /* Even if we can't compute a constant result,
1214 there are some cases worth simplifying. */
1219 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1220 when x is NaN, infinite, or finite and nonzero. They aren't
1221 when x is -0 and the rounding mode is not towards -infinity,
1222 since (-0) + 0 is then 0. */
1223 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1226 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1227 transformations are safe even for IEEE. */
1228 if (GET_CODE (op0
) == NEG
)
1229 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1230 else if (GET_CODE (op1
) == NEG
)
1231 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1233 /* (~a) + 1 -> -a */
1234 if (INTEGRAL_MODE_P (mode
)
1235 && GET_CODE (op0
) == NOT
1236 && trueop1
== const1_rtx
)
1237 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1239 /* Handle both-operands-constant cases. We can only add
1240 CONST_INTs to constants since the sum of relocatable symbols
1241 can't be handled by most assemblers. Don't add CONST_INT
1242 to CONST_INT since overflow won't be computed properly if wider
1243 than HOST_BITS_PER_WIDE_INT. */
1245 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1246 && GET_CODE (op1
) == CONST_INT
)
1247 return plus_constant (op0
, INTVAL (op1
));
1248 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1249 && GET_CODE (op0
) == CONST_INT
)
1250 return plus_constant (op1
, INTVAL (op0
));
1252 /* See if this is something like X * C - X or vice versa or
1253 if the multiplication is written as a shift. If so, we can
1254 distribute and make a new multiply, shift, or maybe just
1255 have X (if C is 2 in the example above). But don't make
1256 something more expensive than we had before. */
1258 if (SCALAR_INT_MODE_P (mode
))
1260 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1261 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1262 rtx lhs
= op0
, rhs
= op1
;
1264 if (GET_CODE (lhs
) == NEG
)
1268 lhs
= XEXP (lhs
, 0);
1270 else if (GET_CODE (lhs
) == MULT
1271 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1273 coeff0l
= INTVAL (XEXP (lhs
, 1));
1274 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1275 lhs
= XEXP (lhs
, 0);
1277 else if (GET_CODE (lhs
) == ASHIFT
1278 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1279 && INTVAL (XEXP (lhs
, 1)) >= 0
1280 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1282 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1284 lhs
= XEXP (lhs
, 0);
1287 if (GET_CODE (rhs
) == NEG
)
1291 rhs
= XEXP (rhs
, 0);
1293 else if (GET_CODE (rhs
) == MULT
1294 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1296 coeff1l
= INTVAL (XEXP (rhs
, 1));
1297 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1298 rhs
= XEXP (rhs
, 0);
1300 else if (GET_CODE (rhs
) == ASHIFT
1301 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1302 && INTVAL (XEXP (rhs
, 1)) >= 0
1303 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1305 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1307 rhs
= XEXP (rhs
, 0);
1310 if (rtx_equal_p (lhs
, rhs
))
1312 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1314 unsigned HOST_WIDE_INT l
;
1317 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1318 coeff
= immed_double_const (l
, h
, mode
);
1320 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1321 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1326 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1327 if ((GET_CODE (op1
) == CONST_INT
1328 || GET_CODE (op1
) == CONST_DOUBLE
)
1329 && GET_CODE (op0
) == XOR
1330 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1331 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1332 && mode_signbit_p (mode
, op1
))
1333 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1334 simplify_gen_binary (XOR
, mode
, op1
,
1337 /* If one of the operands is a PLUS or a MINUS, see if we can
1338 simplify this by the associative law.
1339 Don't use the associative law for floating point.
1340 The inaccuracy makes it nonassociative,
1341 and subtle programs can break if operations are associated. */
1343 if (INTEGRAL_MODE_P (mode
)
1344 && (plus_minus_operand_p (op0
)
1345 || plus_minus_operand_p (op1
))
1346 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1349 /* Reassociate floating point addition only when the user
1350 specifies unsafe math optimizations. */
1351 if (FLOAT_MODE_P (mode
)
1352 && flag_unsafe_math_optimizations
)
1354 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1362 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1363 using cc0, in which case we want to leave it as a COMPARE
1364 so we can distinguish it from a register-register-copy.
1366 In IEEE floating point, x-0 is not the same as x. */
1368 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1369 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1370 && trueop1
== CONST0_RTX (mode
))
1374 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1375 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1376 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1377 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1379 rtx xop00
= XEXP (op0
, 0);
1380 rtx xop10
= XEXP (op1
, 0);
1383 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1385 if (REG_P (xop00
) && REG_P (xop10
)
1386 && GET_MODE (xop00
) == GET_MODE (xop10
)
1387 && REGNO (xop00
) == REGNO (xop10
)
1388 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1389 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1396 /* We can't assume x-x is 0 even with non-IEEE floating point,
1397 but since it is zero except in very strange circumstances, we
1398 will treat it as zero with -funsafe-math-optimizations. */
1399 if (rtx_equal_p (trueop0
, trueop1
)
1400 && ! side_effects_p (op0
)
1401 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1402 return CONST0_RTX (mode
);
1404 /* Change subtraction from zero into negation. (0 - x) is the
1405 same as -x when x is NaN, infinite, or finite and nonzero.
1406 But if the mode has signed zeros, and does not round towards
1407 -infinity, then 0 - 0 is 0, not -0. */
1408 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1409 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1411 /* (-1 - a) is ~a. */
1412 if (trueop0
== constm1_rtx
)
1413 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1415 /* Subtracting 0 has no effect unless the mode has signed zeros
1416 and supports rounding towards -infinity. In such a case,
1418 if (!(HONOR_SIGNED_ZEROS (mode
)
1419 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1420 && trueop1
== CONST0_RTX (mode
))
1423 /* See if this is something like X * C - X or vice versa or
1424 if the multiplication is written as a shift. If so, we can
1425 distribute and make a new multiply, shift, or maybe just
1426 have X (if C is 2 in the example above). But don't make
1427 something more expensive than we had before. */
1429 if (SCALAR_INT_MODE_P (mode
))
1431 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1432 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1433 rtx lhs
= op0
, rhs
= op1
;
1435 if (GET_CODE (lhs
) == NEG
)
1439 lhs
= XEXP (lhs
, 0);
1441 else if (GET_CODE (lhs
) == MULT
1442 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1444 coeff0l
= INTVAL (XEXP (lhs
, 1));
1445 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1446 lhs
= XEXP (lhs
, 0);
1448 else if (GET_CODE (lhs
) == ASHIFT
1449 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1450 && INTVAL (XEXP (lhs
, 1)) >= 0
1451 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1453 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1455 lhs
= XEXP (lhs
, 0);
1458 if (GET_CODE (rhs
) == NEG
)
1462 rhs
= XEXP (rhs
, 0);
1464 else if (GET_CODE (rhs
) == MULT
1465 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1467 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1468 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1469 rhs
= XEXP (rhs
, 0);
1471 else if (GET_CODE (rhs
) == ASHIFT
1472 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1473 && INTVAL (XEXP (rhs
, 1)) >= 0
1474 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1476 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1478 rhs
= XEXP (rhs
, 0);
1481 if (rtx_equal_p (lhs
, rhs
))
1483 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1485 unsigned HOST_WIDE_INT l
;
1488 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1489 coeff
= immed_double_const (l
, h
, mode
);
1491 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1492 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1497 /* (a - (-b)) -> (a + b). True even for IEEE. */
1498 if (GET_CODE (op1
) == NEG
)
1499 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1501 /* (-x - c) may be simplified as (-c - x). */
1502 if (GET_CODE (op0
) == NEG
1503 && (GET_CODE (op1
) == CONST_INT
1504 || GET_CODE (op1
) == CONST_DOUBLE
))
1506 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1508 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1511 /* If one of the operands is a PLUS or a MINUS, see if we can
1512 simplify this by the associative law.
1513 Don't use the associative law for floating point.
1514 The inaccuracy makes it nonassociative,
1515 and subtle programs can break if operations are associated. */
1517 if (INTEGRAL_MODE_P (mode
)
1518 && (plus_minus_operand_p (op0
)
1519 || plus_minus_operand_p (op1
))
1520 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1523 /* Don't let a relocatable value get a negative coeff. */
1524 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1525 return simplify_gen_binary (PLUS
, mode
,
1527 neg_const_int (mode
, op1
));
1529 /* (x - (x & y)) -> (x & ~y) */
1530 if (GET_CODE (op1
) == AND
)
1532 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1534 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1535 GET_MODE (XEXP (op1
, 1)));
1536 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1538 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1540 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1541 GET_MODE (XEXP (op1
, 0)));
1542 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1548 if (trueop1
== constm1_rtx
)
1549 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1551 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1552 x is NaN, since x * 0 is then also NaN. Nor is it valid
1553 when the mode has signed zeros, since multiplying a negative
1554 number by 0 will give -0, not 0. */
1555 if (!HONOR_NANS (mode
)
1556 && !HONOR_SIGNED_ZEROS (mode
)
1557 && trueop1
== CONST0_RTX (mode
)
1558 && ! side_effects_p (op0
))
1561 /* In IEEE floating point, x*1 is not equivalent to x for
1563 if (!HONOR_SNANS (mode
)
1564 && trueop1
== CONST1_RTX (mode
))
1567 /* Convert multiply by constant power of two into shift unless
1568 we are still generating RTL. This test is a kludge. */
1569 if (GET_CODE (trueop1
) == CONST_INT
1570 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1571 /* If the mode is larger than the host word size, and the
1572 uppermost bit is set, then this isn't a power of two due
1573 to implicit sign extension. */
1574 && (width
<= HOST_BITS_PER_WIDE_INT
1575 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1576 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1578 /* Likewise for multipliers wider than a word. */
1579 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1580 && (GET_MODE (trueop1
) == VOIDmode
1581 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
1582 && GET_MODE (op0
) == mode
1583 && CONST_DOUBLE_LOW (trueop1
) == 0
1584 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
1585 return simplify_gen_binary (ASHIFT
, mode
, op0
,
1586 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
1588 /* x*2 is x+x and x*(-1) is -x */
1589 if (GET_CODE (trueop1
) == CONST_DOUBLE
1590 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1591 && GET_MODE (op0
) == mode
)
1594 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1596 if (REAL_VALUES_EQUAL (d
, dconst2
))
1597 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1599 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1600 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1603 /* Reassociate multiplication, but for floating point MULTs
1604 only when the user specifies unsafe math optimizations. */
1605 if (! FLOAT_MODE_P (mode
)
1606 || flag_unsafe_math_optimizations
)
1608 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1615 if (trueop1
== const0_rtx
)
1617 if (GET_CODE (trueop1
) == CONST_INT
1618 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1619 == GET_MODE_MASK (mode
)))
1621 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1623 /* A | (~A) -> -1 */
1624 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1625 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1626 && ! side_effects_p (op0
)
1627 && SCALAR_INT_MODE_P (mode
))
1629 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1635 if (trueop1
== const0_rtx
)
1637 if (GET_CODE (trueop1
) == CONST_INT
1638 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1639 == GET_MODE_MASK (mode
)))
1640 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1641 if (trueop0
== trueop1
1642 && ! side_effects_p (op0
)
1643 && GET_MODE_CLASS (mode
) != MODE_CC
)
1644 return CONST0_RTX (mode
);
1646 /* Canonicalize XOR of the most significant bit to PLUS. */
1647 if ((GET_CODE (op1
) == CONST_INT
1648 || GET_CODE (op1
) == CONST_DOUBLE
)
1649 && mode_signbit_p (mode
, op1
))
1650 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
1651 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1652 if ((GET_CODE (op1
) == CONST_INT
1653 || GET_CODE (op1
) == CONST_DOUBLE
)
1654 && GET_CODE (op0
) == PLUS
1655 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1656 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1657 && mode_signbit_p (mode
, XEXP (op0
, 1)))
1658 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1659 simplify_gen_binary (XOR
, mode
, op1
,
1662 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1668 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
1670 /* If we are turning off bits already known off in OP0, we need
1672 if (GET_CODE (trueop1
) == CONST_INT
1673 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1674 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
1676 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1677 && GET_MODE_CLASS (mode
) != MODE_CC
)
1680 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1681 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1682 && ! side_effects_p (op0
)
1683 && GET_MODE_CLASS (mode
) != MODE_CC
)
1684 return CONST0_RTX (mode
);
1686 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1687 there are no nonzero bits of C outside of X's mode. */
1688 if ((GET_CODE (op0
) == SIGN_EXTEND
1689 || GET_CODE (op0
) == ZERO_EXTEND
)
1690 && GET_CODE (trueop1
) == CONST_INT
1691 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1692 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
1693 & INTVAL (trueop1
)) == 0)
1695 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
1696 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
1697 gen_int_mode (INTVAL (trueop1
),
1699 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
1702 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1703 ((A & N) + B) & M -> (A + B) & M
1704 Similarly if (N & M) == 0,
1705 ((A | N) + B) & M -> (A + B) & M
1706 and for - instead of + and/or ^ instead of |. */
1707 if (GET_CODE (trueop1
) == CONST_INT
1708 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1709 && ~INTVAL (trueop1
)
1710 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
1711 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
1716 pmop
[0] = XEXP (op0
, 0);
1717 pmop
[1] = XEXP (op0
, 1);
1719 for (which
= 0; which
< 2; which
++)
1722 switch (GET_CODE (tem
))
1725 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1726 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
1727 == INTVAL (trueop1
))
1728 pmop
[which
] = XEXP (tem
, 0);
1732 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1733 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
1734 pmop
[which
] = XEXP (tem
, 0);
1741 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
1743 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
1745 return simplify_gen_binary (code
, mode
, tem
, op1
);
1748 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1754 /* 0/x is 0 (or x&0 if x has side-effects). */
1755 if (trueop0
== CONST0_RTX (mode
))
1757 if (side_effects_p (op1
))
1758 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1762 if (trueop1
== CONST1_RTX (mode
))
1763 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
1764 /* Convert divide by power of two into shift. */
1765 if (GET_CODE (trueop1
) == CONST_INT
1766 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
1767 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
1771 /* Handle floating point and integers separately. */
1772 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1774 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1775 safe for modes with NaNs, since 0.0 / 0.0 will then be
1776 NaN rather than 0.0. Nor is it safe for modes with signed
1777 zeros, since dividing 0 by a negative number gives -0.0 */
1778 if (trueop0
== CONST0_RTX (mode
)
1779 && !HONOR_NANS (mode
)
1780 && !HONOR_SIGNED_ZEROS (mode
)
1781 && ! side_effects_p (op1
))
1784 if (trueop1
== CONST1_RTX (mode
)
1785 && !HONOR_SNANS (mode
))
1788 if (GET_CODE (trueop1
) == CONST_DOUBLE
1789 && trueop1
!= CONST0_RTX (mode
))
1792 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1795 if (REAL_VALUES_EQUAL (d
, dconstm1
)
1796 && !HONOR_SNANS (mode
))
1797 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1799 /* Change FP division by a constant into multiplication.
1800 Only do this with -funsafe-math-optimizations. */
1801 if (flag_unsafe_math_optimizations
1802 && !REAL_VALUES_EQUAL (d
, dconst0
))
1804 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
1805 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1806 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1812 /* 0/x is 0 (or x&0 if x has side-effects). */
1813 if (trueop0
== CONST0_RTX (mode
))
1815 if (side_effects_p (op1
))
1816 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1820 if (trueop1
== CONST1_RTX (mode
))
1821 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
1823 if (trueop1
== constm1_rtx
)
1825 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
1826 return simplify_gen_unary (NEG
, mode
, x
, mode
);
1832 /* 0%x is 0 (or x&0 if x has side-effects). */
1833 if (trueop0
== CONST0_RTX (mode
))
1835 if (side_effects_p (op1
))
1836 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1839 /* x%1 is 0 (of x&0 if x has side-effects). */
1840 if (trueop1
== CONST1_RTX (mode
))
1842 if (side_effects_p (op0
))
1843 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
1844 return CONST0_RTX (mode
);
1846 /* Implement modulus by power of two as AND. */
1847 if (GET_CODE (trueop1
) == CONST_INT
1848 && exact_log2 (INTVAL (trueop1
)) > 0)
1849 return simplify_gen_binary (AND
, mode
, op0
,
1850 GEN_INT (INTVAL (op1
) - 1));
1854 /* 0%x is 0 (or x&0 if x has side-effects). */
1855 if (trueop0
== CONST0_RTX (mode
))
1857 if (side_effects_p (op1
))
1858 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1861 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1862 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
1864 if (side_effects_p (op0
))
1865 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
1866 return CONST0_RTX (mode
);
1873 /* Rotating ~0 always results in ~0. */
1874 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1875 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1876 && ! side_effects_p (op1
))
1879 /* Fall through.... */
1883 if (trueop1
== CONST0_RTX (mode
))
1885 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
1890 if (width
<= HOST_BITS_PER_WIDE_INT
1891 && GET_CODE (trueop1
) == CONST_INT
1892 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1893 && ! side_effects_p (op0
))
1895 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1897 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1903 if (width
<= HOST_BITS_PER_WIDE_INT
1904 && GET_CODE (trueop1
) == CONST_INT
1905 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1906 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1907 && ! side_effects_p (op0
))
1909 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1911 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1917 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
1919 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1921 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1927 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1929 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1931 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1940 /* ??? There are simplifications that can be done. */
1944 if (!VECTOR_MODE_P (mode
))
1946 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
1947 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
1948 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
1949 gcc_assert (XVECLEN (trueop1
, 0) == 1);
1950 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
1952 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1953 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
1958 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
1959 gcc_assert (GET_MODE_INNER (mode
)
1960 == GET_MODE_INNER (GET_MODE (trueop0
)));
1961 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
1963 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1965 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1966 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1967 rtvec v
= rtvec_alloc (n_elts
);
1970 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
1971 for (i
= 0; i
< n_elts
; i
++)
1973 rtx x
= XVECEXP (trueop1
, 0, i
);
1975 gcc_assert (GET_CODE (x
) == CONST_INT
);
1976 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
1980 return gen_rtx_CONST_VECTOR (mode
, v
);
1986 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
1987 ? GET_MODE (trueop0
)
1988 : GET_MODE_INNER (mode
));
1989 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
1990 ? GET_MODE (trueop1
)
1991 : GET_MODE_INNER (mode
));
1993 gcc_assert (VECTOR_MODE_P (mode
));
1994 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
1995 == GET_MODE_SIZE (mode
));
1997 if (VECTOR_MODE_P (op0_mode
))
1998 gcc_assert (GET_MODE_INNER (mode
)
1999 == GET_MODE_INNER (op0_mode
));
2001 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2003 if (VECTOR_MODE_P (op1_mode
))
2004 gcc_assert (GET_MODE_INNER (mode
)
2005 == GET_MODE_INNER (op1_mode
));
2007 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2009 if ((GET_CODE (trueop0
) == CONST_VECTOR
2010 || GET_CODE (trueop0
) == CONST_INT
2011 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2012 && (GET_CODE (trueop1
) == CONST_VECTOR
2013 || GET_CODE (trueop1
) == CONST_INT
2014 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2016 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2017 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2018 rtvec v
= rtvec_alloc (n_elts
);
2020 unsigned in_n_elts
= 1;
2022 if (VECTOR_MODE_P (op0_mode
))
2023 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2024 for (i
= 0; i
< n_elts
; i
++)
2028 if (!VECTOR_MODE_P (op0_mode
))
2029 RTVEC_ELT (v
, i
) = trueop0
;
2031 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2035 if (!VECTOR_MODE_P (op1_mode
))
2036 RTVEC_ELT (v
, i
) = trueop1
;
2038 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2043 return gen_rtx_CONST_VECTOR (mode
, v
);
2056 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2059 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
2061 unsigned int width
= GET_MODE_BITSIZE (mode
);
2063 if (VECTOR_MODE_P (mode
)
2064 && code
!= VEC_CONCAT
2065 && GET_CODE (op0
) == CONST_VECTOR
2066 && GET_CODE (op1
) == CONST_VECTOR
)
2068 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2069 enum machine_mode op0mode
= GET_MODE (op0
);
2070 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
2071 enum machine_mode op1mode
= GET_MODE (op1
);
2072 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
2073 rtvec v
= rtvec_alloc (n_elts
);
2076 gcc_assert (op0_n_elts
== n_elts
);
2077 gcc_assert (op1_n_elts
== n_elts
);
2078 for (i
= 0; i
< n_elts
; i
++)
2080 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
2081 CONST_VECTOR_ELT (op0
, i
),
2082 CONST_VECTOR_ELT (op1
, i
));
2085 RTVEC_ELT (v
, i
) = x
;
2088 return gen_rtx_CONST_VECTOR (mode
, v
);
2091 if (VECTOR_MODE_P (mode
)
2092 && code
== VEC_CONCAT
2093 && CONSTANT_P (op0
) && CONSTANT_P (op1
))
2095 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2096 rtvec v
= rtvec_alloc (n_elts
);
2098 gcc_assert (n_elts
>= 2);
2101 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
2102 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
2104 RTVEC_ELT (v
, 0) = op0
;
2105 RTVEC_ELT (v
, 1) = op1
;
2109 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
2110 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
2113 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
2114 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
2115 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
2117 for (i
= 0; i
< op0_n_elts
; ++i
)
2118 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
2119 for (i
= 0; i
< op1_n_elts
; ++i
)
2120 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
2123 return gen_rtx_CONST_VECTOR (mode
, v
);
2126 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
2127 && GET_CODE (op0
) == CONST_DOUBLE
2128 && GET_CODE (op1
) == CONST_DOUBLE
2129 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
2140 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
2142 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
2144 for (i
= 0; i
< 4; i
++)
2161 real_from_target (&r
, tmp0
, mode
);
2162 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
2166 REAL_VALUE_TYPE f0
, f1
, value
, result
;
2169 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
2170 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
2171 real_convert (&f0
, mode
, &f0
);
2172 real_convert (&f1
, mode
, &f1
);
2174 if (HONOR_SNANS (mode
)
2175 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
2179 && REAL_VALUES_EQUAL (f1
, dconst0
)
2180 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
2183 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2184 && flag_trapping_math
2185 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
2187 int s0
= REAL_VALUE_NEGATIVE (f0
);
2188 int s1
= REAL_VALUE_NEGATIVE (f1
);
2193 /* Inf + -Inf = NaN plus exception. */
2198 /* Inf - Inf = NaN plus exception. */
2203 /* Inf / Inf = NaN plus exception. */
2210 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2211 && flag_trapping_math
2212 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
2213 || (REAL_VALUE_ISINF (f1
)
2214 && REAL_VALUES_EQUAL (f0
, dconst0
))))
2215 /* Inf * 0 = NaN plus exception. */
2218 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
2220 real_convert (&result
, mode
, &value
);
2222 /* Don't constant fold this floating point operation if the
2223 result may dependent upon the run-time rounding mode and
2224 flag_rounding_math is set, or if GCC's software emulation
2225 is unable to accurately represent the result. */
2227 if ((flag_rounding_math
2228 || (REAL_MODE_FORMAT_COMPOSITE_P (mode
)
2229 && !flag_unsafe_math_optimizations
))
2230 && (inexact
|| !real_identical (&result
, &value
)))
2233 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
2237 /* We can fold some multi-word operations. */
2238 if (GET_MODE_CLASS (mode
) == MODE_INT
2239 && width
== HOST_BITS_PER_WIDE_INT
* 2
2240 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
2241 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
2243 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
2244 HOST_WIDE_INT h1
, h2
, hv
, ht
;
2246 if (GET_CODE (op0
) == CONST_DOUBLE
)
2247 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
2249 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
2251 if (GET_CODE (op1
) == CONST_DOUBLE
)
2252 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
2254 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
2259 /* A - B == A + (-B). */
2260 neg_double (l2
, h2
, &lv
, &hv
);
2263 /* Fall through.... */
2266 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2270 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2274 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2275 &lv
, &hv
, <
, &ht
))
2280 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2281 <
, &ht
, &lv
, &hv
))
2286 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2287 &lv
, &hv
, <
, &ht
))
2292 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2293 <
, &ht
, &lv
, &hv
))
2298 lv
= l1
& l2
, hv
= h1
& h2
;
2302 lv
= l1
| l2
, hv
= h1
| h2
;
2306 lv
= l1
^ l2
, hv
= h1
^ h2
;
2312 && ((unsigned HOST_WIDE_INT
) l1
2313 < (unsigned HOST_WIDE_INT
) l2
)))
2322 && ((unsigned HOST_WIDE_INT
) l1
2323 > (unsigned HOST_WIDE_INT
) l2
)))
2330 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
2332 && ((unsigned HOST_WIDE_INT
) l1
2333 < (unsigned HOST_WIDE_INT
) l2
)))
2340 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
2342 && ((unsigned HOST_WIDE_INT
) l1
2343 > (unsigned HOST_WIDE_INT
) l2
)))
2349 case LSHIFTRT
: case ASHIFTRT
:
2351 case ROTATE
: case ROTATERT
:
2352 if (SHIFT_COUNT_TRUNCATED
)
2353 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
2355 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
2358 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
2359 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
2361 else if (code
== ASHIFT
)
2362 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
2363 else if (code
== ROTATE
)
2364 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
2365 else /* code == ROTATERT */
2366 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
2373 return immed_double_const (lv
, hv
, mode
);
2376 if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
2377 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
2379 /* Get the integer argument values in two forms:
2380 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2382 arg0
= INTVAL (op0
);
2383 arg1
= INTVAL (op1
);
2385 if (width
< HOST_BITS_PER_WIDE_INT
)
2387 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2388 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2391 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2392 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2395 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2396 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2404 /* Compute the value of the arithmetic. */
2409 val
= arg0s
+ arg1s
;
2413 val
= arg0s
- arg1s
;
2417 val
= arg0s
* arg1s
;
2422 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2425 val
= arg0s
/ arg1s
;
2430 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2433 val
= arg0s
% arg1s
;
2438 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2441 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2446 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2449 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2467 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
2468 the value is in range. We can't return any old value for
2469 out-of-range arguments because either the middle-end (via
2470 shift_truncation_mask) or the back-end might be relying on
2471 target-specific knowledge. Nor can we rely on
2472 shift_truncation_mask, since the shift might not be part of an
2473 ashlM3, lshrM3 or ashrM3 instruction. */
2474 if (SHIFT_COUNT_TRUNCATED
)
2475 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
2476 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
2479 val
= (code
== ASHIFT
2480 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
2481 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
2483 /* Sign-extend the result for arithmetic right shifts. */
2484 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
2485 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
2493 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2494 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2502 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2503 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2507 /* Do nothing here. */
2511 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2515 val
= ((unsigned HOST_WIDE_INT
) arg0
2516 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2520 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2524 val
= ((unsigned HOST_WIDE_INT
) arg0
2525 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2532 /* ??? There are simplifications that can be done. */
2539 return gen_int_mode (val
, mode
);
2547 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2550 Rather than test for specific case, we do this by a brute-force method
2551 and do all possible simplifications until no more changes occur. Then
2552 we rebuild the operation.
2554 If FORCE is true, then always generate the rtx. This is used to
2555 canonicalize stuff emitted from simplify_gen_binary. Note that this
2556 can still fail if the rtx is too complex. It won't fail just because
2557 the result is not 'simpler' than the input, however. */
2559 struct simplify_plus_minus_op_data
2566 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2568 const struct simplify_plus_minus_op_data
*d1
= p1
;
2569 const struct simplify_plus_minus_op_data
*d2
= p2
;
2571 return (commutative_operand_precedence (d2
->op
)
2572 - commutative_operand_precedence (d1
->op
));
2576 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2579 struct simplify_plus_minus_op_data ops
[8];
2581 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2585 memset (ops
, 0, sizeof ops
);
2587 /* Set up the two operands and then expand them until nothing has been
2588 changed. If we run out of room in our array, give up; this should
2589 almost never happen. */
2594 ops
[1].neg
= (code
== MINUS
);
2600 for (i
= 0; i
< n_ops
; i
++)
2602 rtx this_op
= ops
[i
].op
;
2603 int this_neg
= ops
[i
].neg
;
2604 enum rtx_code this_code
= GET_CODE (this_op
);
2613 ops
[n_ops
].op
= XEXP (this_op
, 1);
2614 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2617 ops
[i
].op
= XEXP (this_op
, 0);
2623 ops
[i
].op
= XEXP (this_op
, 0);
2624 ops
[i
].neg
= ! this_neg
;
2630 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2631 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2632 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2634 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2635 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2636 ops
[n_ops
].neg
= this_neg
;
2644 /* ~a -> (-a - 1) */
2647 ops
[n_ops
].op
= constm1_rtx
;
2648 ops
[n_ops
++].neg
= this_neg
;
2649 ops
[i
].op
= XEXP (this_op
, 0);
2650 ops
[i
].neg
= !this_neg
;
2658 ops
[i
].op
= neg_const_int (mode
, this_op
);
2671 /* If we only have two operands, we can't do anything. */
2672 if (n_ops
<= 2 && !force
)
2675 /* Count the number of CONSTs we didn't split above. */
2676 for (i
= 0; i
< n_ops
; i
++)
2677 if (GET_CODE (ops
[i
].op
) == CONST
)
2680 /* Now simplify each pair of operands until nothing changes. The first
2681 time through just simplify constants against each other. */
2688 for (i
= 0; i
< n_ops
- 1; i
++)
2689 for (j
= i
+ 1; j
< n_ops
; j
++)
2691 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2692 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2694 if (lhs
!= 0 && rhs
!= 0
2695 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2697 enum rtx_code ncode
= PLUS
;
2703 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2705 else if (swap_commutative_operands_p (lhs
, rhs
))
2706 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2708 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2710 /* Reject "simplifications" that just wrap the two
2711 arguments in a CONST. Failure to do so can result
2712 in infinite recursion with simplify_binary_operation
2713 when it calls us to simplify CONST operations. */
2715 && ! (GET_CODE (tem
) == CONST
2716 && GET_CODE (XEXP (tem
, 0)) == ncode
2717 && XEXP (XEXP (tem
, 0), 0) == lhs
2718 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2719 /* Don't allow -x + -1 -> ~x simplifications in the
2720 first pass. This allows us the chance to combine
2721 the -1 with other constants. */
2723 && GET_CODE (tem
) == NOT
2724 && XEXP (tem
, 0) == rhs
))
2727 if (GET_CODE (tem
) == NEG
)
2728 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2729 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2730 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2734 ops
[j
].op
= NULL_RTX
;
2744 /* Pack all the operands to the lower-numbered entries. */
2745 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2750 /* Sort the operations based on swap_commutative_operands_p. */
2751 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2753 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2755 && GET_CODE (ops
[1].op
) == CONST_INT
2756 && CONSTANT_P (ops
[0].op
)
2758 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2760 /* We suppressed creation of trivial CONST expressions in the
2761 combination loop to avoid recursion. Create one manually now.
2762 The combination loop should have ensured that there is exactly
2763 one CONST_INT, and the sort will have ensured that it is last
2764 in the array and that any other constant will be next-to-last. */
2767 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2768 && CONSTANT_P (ops
[n_ops
- 2].op
))
2770 rtx value
= ops
[n_ops
- 1].op
;
2771 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2772 value
= neg_const_int (mode
, value
);
2773 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2777 /* Count the number of CONSTs that we generated. */
2779 for (i
= 0; i
< n_ops
; i
++)
2780 if (GET_CODE (ops
[i
].op
) == CONST
)
2783 /* Give up if we didn't reduce the number of operands we had. Make
2784 sure we count a CONST as two operands. If we have the same
2785 number of operands, but have made more CONSTs than before, this
2786 is also an improvement, so accept it. */
2788 && (n_ops
+ n_consts
> input_ops
2789 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2792 /* Put a non-negated operand first, if possible. */
2794 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2797 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
2806 /* Now make the result by performing the requested operations. */
2808 for (i
= 1; i
< n_ops
; i
++)
2809 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2810 mode
, result
, ops
[i
].op
);
2815 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2817 plus_minus_operand_p (rtx x
)
2819 return GET_CODE (x
) == PLUS
2820 || GET_CODE (x
) == MINUS
2821 || (GET_CODE (x
) == CONST
2822 && GET_CODE (XEXP (x
, 0)) == PLUS
2823 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
2824 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
2827 /* Like simplify_binary_operation except used for relational operators.
2828 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2829 not also be VOIDmode.
2831 CMP_MODE specifies in which mode the comparison is done in, so it is
2832 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2833 the operands or, if both are VOIDmode, the operands are compared in
2834 "infinite precision". */
2836 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2837 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2839 rtx tem
, trueop0
, trueop1
;
2841 if (cmp_mode
== VOIDmode
)
2842 cmp_mode
= GET_MODE (op0
);
2843 if (cmp_mode
== VOIDmode
)
2844 cmp_mode
= GET_MODE (op1
);
2846 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
2849 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2851 if (tem
== const0_rtx
)
2852 return CONST0_RTX (mode
);
2853 #ifdef FLOAT_STORE_FLAG_VALUE
2855 REAL_VALUE_TYPE val
;
2856 val
= FLOAT_STORE_FLAG_VALUE (mode
);
2857 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
2863 if (VECTOR_MODE_P (mode
))
2865 if (tem
== const0_rtx
)
2866 return CONST0_RTX (mode
);
2867 #ifdef VECTOR_STORE_FLAG_VALUE
2872 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
2873 if (val
== NULL_RTX
)
2875 if (val
== const1_rtx
)
2876 return CONST1_RTX (mode
);
2878 units
= GET_MODE_NUNITS (mode
);
2879 v
= rtvec_alloc (units
);
2880 for (i
= 0; i
< units
; i
++)
2881 RTVEC_ELT (v
, i
) = val
;
2882 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
2892 /* For the following tests, ensure const0_rtx is op1. */
2893 if (swap_commutative_operands_p (op0
, op1
)
2894 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
2895 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
2897 /* If op0 is a compare, extract the comparison arguments from it. */
2898 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2899 return simplify_relational_operation (code
, mode
, VOIDmode
,
2900 XEXP (op0
, 0), XEXP (op0
, 1));
2902 if (mode
== VOIDmode
2903 || GET_MODE_CLASS (cmp_mode
) == MODE_CC
2907 trueop0
= avoid_constant_pool_reference (op0
);
2908 trueop1
= avoid_constant_pool_reference (op1
);
2909 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
2913 /* This part of simplify_relational_operation is only used when CMP_MODE
2914 is not in class MODE_CC (i.e. it is a real comparison).
2916 MODE is the mode of the result, while CMP_MODE specifies in which
2917 mode the comparison is done in, so it is the mode of the operands. */
2920 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2921 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2923 enum rtx_code op0code
= GET_CODE (op0
);
2925 if (GET_CODE (op1
) == CONST_INT
)
2927 if (INTVAL (op1
) == 0 && COMPARISON_P (op0
))
2929 /* If op0 is a comparison, extract the comparison arguments form it. */
2932 if (GET_MODE (op0
) == mode
)
2933 return simplify_rtx (op0
);
2935 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
2936 XEXP (op0
, 0), XEXP (op0
, 1));
2938 else if (code
== EQ
)
2940 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
2941 if (new_code
!= UNKNOWN
)
2942 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
2943 XEXP (op0
, 0), XEXP (op0
, 1));
2948 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2949 if ((code
== EQ
|| code
== NE
)
2950 && (op0code
== PLUS
|| op0code
== MINUS
)
2952 && CONSTANT_P (XEXP (op0
, 1))
2953 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
2955 rtx x
= XEXP (op0
, 0);
2956 rtx c
= XEXP (op0
, 1);
2958 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
2960 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
2963 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
2964 the same as (zero_extract:SI FOO (const_int 1) BAR). */
2966 && op1
== const0_rtx
2967 && GET_MODE_CLASS (mode
) == MODE_INT
2968 && cmp_mode
!= VOIDmode
2969 /* ??? Work-around BImode bugs in the ia64 backend. */
2971 && cmp_mode
!= BImode
2972 && nonzero_bits (op0
, cmp_mode
) == 1
2973 && STORE_FLAG_VALUE
== 1)
2974 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
2975 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
2976 : lowpart_subreg (mode
, op0
, cmp_mode
);
2981 /* Check if the given comparison (done in the given MODE) is actually a
2982 tautology or a contradiction.
2983 If no simplification is possible, this function returns zero.
2984 Otherwise, it returns either const_true_rtx or const0_rtx. */
2987 simplify_const_relational_operation (enum rtx_code code
,
2988 enum machine_mode mode
,
2991 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2996 gcc_assert (mode
!= VOIDmode
2997 || (GET_MODE (op0
) == VOIDmode
2998 && GET_MODE (op1
) == VOIDmode
));
3000 /* If op0 is a compare, extract the comparison arguments from it. */
3001 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3002 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
3004 /* We can't simplify MODE_CC values since we don't know what the
3005 actual comparison is. */
3006 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
3009 /* Make sure the constant is second. */
3010 if (swap_commutative_operands_p (op0
, op1
))
3012 tem
= op0
, op0
= op1
, op1
= tem
;
3013 code
= swap_condition (code
);
3016 trueop0
= avoid_constant_pool_reference (op0
);
3017 trueop1
= avoid_constant_pool_reference (op1
);
3019 /* For integer comparisons of A and B maybe we can simplify A - B and can
3020 then simplify a comparison of that with zero. If A and B are both either
3021 a register or a CONST_INT, this can't help; testing for these cases will
3022 prevent infinite recursion here and speed things up.
3024 If CODE is an unsigned comparison, then we can never do this optimization,
3025 because it gives an incorrect result if the subtraction wraps around zero.
3026 ANSI C defines unsigned operations such that they never overflow, and
3027 thus such cases can not be ignored; but we cannot do it even for
3028 signed comparisons for languages such as Java, so test flag_wrapv. */
3030 if (!flag_wrapv
&& INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
3031 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
3032 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
3033 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
3034 /* We cannot do this for == or != if tem is a nonzero address. */
3035 && ((code
!= EQ
&& code
!= NE
) || ! nonzero_address_p (tem
))
3036 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
3037 return simplify_const_relational_operation (signed_condition (code
),
3038 mode
, tem
, const0_rtx
);
3040 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
3041 return const_true_rtx
;
3043 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
3046 /* For modes without NaNs, if the two operands are equal, we know the
3047 result except if they have side-effects. */
3048 if (! HONOR_NANS (GET_MODE (trueop0
))
3049 && rtx_equal_p (trueop0
, trueop1
)
3050 && ! side_effects_p (trueop0
))
3051 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
3053 /* If the operands are floating-point constants, see if we can fold
3055 else if (GET_CODE (trueop0
) == CONST_DOUBLE
3056 && GET_CODE (trueop1
) == CONST_DOUBLE
3057 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
3059 REAL_VALUE_TYPE d0
, d1
;
3061 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
3062 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
3064 /* Comparisons are unordered iff at least one of the values is NaN. */
3065 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
3075 return const_true_rtx
;
3088 equal
= REAL_VALUES_EQUAL (d0
, d1
);
3089 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
3090 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
3093 /* Otherwise, see if the operands are both integers. */
3094 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
3095 && (GET_CODE (trueop0
) == CONST_DOUBLE
3096 || GET_CODE (trueop0
) == CONST_INT
)
3097 && (GET_CODE (trueop1
) == CONST_DOUBLE
3098 || GET_CODE (trueop1
) == CONST_INT
))
3100 int width
= GET_MODE_BITSIZE (mode
);
3101 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
3102 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
3104 /* Get the two words comprising each integer constant. */
3105 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
3107 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
3108 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
3112 l0u
= l0s
= INTVAL (trueop0
);
3113 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
3116 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
3118 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
3119 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
3123 l1u
= l1s
= INTVAL (trueop1
);
3124 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
3127 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3128 we have to sign or zero-extend the values. */
3129 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
3131 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3132 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3134 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3135 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3137 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3138 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3140 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
3141 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
3143 equal
= (h0u
== h1u
&& l0u
== l1u
);
3144 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
3145 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
3146 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
3147 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
3150 /* Otherwise, there are some code-specific tests we can make. */
3153 /* Optimize comparisons with upper and lower bounds. */
3154 if (SCALAR_INT_MODE_P (mode
)
3155 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3168 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
3175 /* x >= min is always true. */
3176 if (rtx_equal_p (trueop1
, mmin
))
3177 tem
= const_true_rtx
;
3183 /* x <= max is always true. */
3184 if (rtx_equal_p (trueop1
, mmax
))
3185 tem
= const_true_rtx
;
3190 /* x > max is always false. */
3191 if (rtx_equal_p (trueop1
, mmax
))
3197 /* x < min is always false. */
3198 if (rtx_equal_p (trueop1
, mmin
))
3205 if (tem
== const0_rtx
3206 || tem
== const_true_rtx
)
3213 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3218 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3219 return const_true_rtx
;
3223 /* Optimize abs(x) < 0.0. */
3224 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
3226 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3228 if (GET_CODE (tem
) == ABS
)
3234 /* Optimize abs(x) >= 0.0. */
3235 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
3237 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3239 if (GET_CODE (tem
) == ABS
)
3240 return const_true_rtx
;
3245 /* Optimize ! (abs(x) < 0.0). */
3246 if (trueop1
== CONST0_RTX (mode
))
3248 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3250 if (GET_CODE (tem
) == ABS
)
3251 return const_true_rtx
;
3262 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3268 return equal
? const_true_rtx
: const0_rtx
;
3271 return ! equal
? const_true_rtx
: const0_rtx
;
3274 return op0lt
? const_true_rtx
: const0_rtx
;
3277 return op1lt
? const_true_rtx
: const0_rtx
;
3279 return op0ltu
? const_true_rtx
: const0_rtx
;
3281 return op1ltu
? const_true_rtx
: const0_rtx
;
3284 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
3287 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
3289 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
3291 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
3293 return const_true_rtx
;
3301 /* Simplify CODE, an operation with result mode MODE and three operands,
3302 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3303 a constant. Return 0 if no simplifications is possible. */
3306 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
3307 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
3310 unsigned int width
= GET_MODE_BITSIZE (mode
);
3312 /* VOIDmode means "infinite" precision. */
3314 width
= HOST_BITS_PER_WIDE_INT
;
3320 if (GET_CODE (op0
) == CONST_INT
3321 && GET_CODE (op1
) == CONST_INT
3322 && GET_CODE (op2
) == CONST_INT
3323 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
3324 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
3326 /* Extracting a bit-field from a constant */
3327 HOST_WIDE_INT val
= INTVAL (op0
);
3329 if (BITS_BIG_ENDIAN
)
3330 val
>>= (GET_MODE_BITSIZE (op0_mode
)
3331 - INTVAL (op2
) - INTVAL (op1
));
3333 val
>>= INTVAL (op2
);
3335 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
3337 /* First zero-extend. */
3338 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
3339 /* If desired, propagate sign bit. */
3340 if (code
== SIGN_EXTRACT
3341 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
3342 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
3345 /* Clear the bits that don't belong in our mode,
3346 unless they and our sign bit are all one.
3347 So we get either a reasonable negative value or a reasonable
3348 unsigned value for this mode. */
3349 if (width
< HOST_BITS_PER_WIDE_INT
3350 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
3351 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
3352 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3354 return gen_int_mode (val
, mode
);
3359 if (GET_CODE (op0
) == CONST_INT
)
3360 return op0
!= const0_rtx
? op1
: op2
;
3362 /* Convert c ? a : a into "a". */
3363 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
3366 /* Convert a != b ? a : b into "a". */
3367 if (GET_CODE (op0
) == NE
3368 && ! side_effects_p (op0
)
3369 && ! HONOR_NANS (mode
)
3370 && ! HONOR_SIGNED_ZEROS (mode
)
3371 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3372 && rtx_equal_p (XEXP (op0
, 1), op2
))
3373 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3374 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3377 /* Convert a == b ? a : b into "b". */
3378 if (GET_CODE (op0
) == EQ
3379 && ! side_effects_p (op0
)
3380 && ! HONOR_NANS (mode
)
3381 && ! HONOR_SIGNED_ZEROS (mode
)
3382 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3383 && rtx_equal_p (XEXP (op0
, 1), op2
))
3384 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3385 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3388 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
3390 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
3391 ? GET_MODE (XEXP (op0
, 1))
3392 : GET_MODE (XEXP (op0
, 0)));
3395 /* Look for happy constants in op1 and op2. */
3396 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
3398 HOST_WIDE_INT t
= INTVAL (op1
);
3399 HOST_WIDE_INT f
= INTVAL (op2
);
3401 if (t
== STORE_FLAG_VALUE
&& f
== 0)
3402 code
= GET_CODE (op0
);
3403 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
3406 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
3414 return simplify_gen_relational (code
, mode
, cmp_mode
,
3415 XEXP (op0
, 0), XEXP (op0
, 1));
3418 if (cmp_mode
== VOIDmode
)
3419 cmp_mode
= op0_mode
;
3420 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
3421 cmp_mode
, XEXP (op0
, 0),
3424 /* See if any simplifications were possible. */
3427 if (GET_CODE (temp
) == CONST_INT
)
3428 return temp
== const0_rtx
? op2
: op1
;
3430 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
3436 gcc_assert (GET_MODE (op0
) == mode
);
3437 gcc_assert (GET_MODE (op1
) == mode
);
3438 gcc_assert (VECTOR_MODE_P (mode
));
3439 op2
= avoid_constant_pool_reference (op2
);
3440 if (GET_CODE (op2
) == CONST_INT
)
3442 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3443 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3444 int mask
= (1 << n_elts
) - 1;
3446 if (!(INTVAL (op2
) & mask
))
3448 if ((INTVAL (op2
) & mask
) == mask
)
3451 op0
= avoid_constant_pool_reference (op0
);
3452 op1
= avoid_constant_pool_reference (op1
);
3453 if (GET_CODE (op0
) == CONST_VECTOR
3454 && GET_CODE (op1
) == CONST_VECTOR
)
3456 rtvec v
= rtvec_alloc (n_elts
);
3459 for (i
= 0; i
< n_elts
; i
++)
3460 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
3461 ? CONST_VECTOR_ELT (op0
, i
)
3462 : CONST_VECTOR_ELT (op1
, i
));
3463 return gen_rtx_CONST_VECTOR (mode
, v
);
3475 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3476 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3478 Works by unpacking OP into a collection of 8-bit values
3479 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3480 and then repacking them again for OUTERMODE. */
3483 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
3484 enum machine_mode innermode
, unsigned int byte
)
3486 /* We support up to 512-bit values (for V8DFmode). */
3490 value_mask
= (1 << value_bit
) - 1
3492 unsigned char value
[max_bitsize
/ value_bit
];
3501 rtvec result_v
= NULL
;
3502 enum mode_class outer_class
;
3503 enum machine_mode outer_submode
;
3505 /* Some ports misuse CCmode. */
3506 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
3509 /* We have no way to represent a complex constant at the rtl level. */
3510 if (COMPLEX_MODE_P (outermode
))
3513 /* Unpack the value. */
3515 if (GET_CODE (op
) == CONST_VECTOR
)
3517 num_elem
= CONST_VECTOR_NUNITS (op
);
3518 elems
= &CONST_VECTOR_ELT (op
, 0);
3519 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
3525 elem_bitsize
= max_bitsize
;
3527 /* If this asserts, it is too complicated; reducing value_bit may help. */
3528 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
3529 /* I don't know how to handle endianness of sub-units. */
3530 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
3532 for (elem
= 0; elem
< num_elem
; elem
++)
3535 rtx el
= elems
[elem
];
3537 /* Vectors are kept in target memory order. (This is probably
3540 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3541 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3543 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3544 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3545 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3546 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3547 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3550 switch (GET_CODE (el
))
3554 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3556 *vp
++ = INTVAL (el
) >> i
;
3557 /* CONST_INTs are always logically sign-extended. */
3558 for (; i
< elem_bitsize
; i
+= value_bit
)
3559 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
3563 if (GET_MODE (el
) == VOIDmode
)
3565 /* If this triggers, someone should have generated a
3566 CONST_INT instead. */
3567 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
3569 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
3570 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
3571 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
3574 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
3577 /* It shouldn't matter what's done here, so fill it with
3579 for (; i
< max_bitsize
; i
+= value_bit
)
3584 long tmp
[max_bitsize
/ 32];
3585 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
3587 gcc_assert (GET_MODE_CLASS (GET_MODE (el
)) == MODE_FLOAT
);
3588 gcc_assert (bitsize
<= elem_bitsize
);
3589 gcc_assert (bitsize
% value_bit
== 0);
3591 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
3594 /* real_to_target produces its result in words affected by
3595 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3596 and use WORDS_BIG_ENDIAN instead; see the documentation
3597 of SUBREG in rtl.texi. */
3598 for (i
= 0; i
< bitsize
; i
+= value_bit
)
3601 if (WORDS_BIG_ENDIAN
)
3602 ibase
= bitsize
- 1 - i
;
3605 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
3608 /* It shouldn't matter what's done here, so fill it with
3610 for (; i
< elem_bitsize
; i
+= value_bit
)
3620 /* Now, pick the right byte to start with. */
3621 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3622 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3623 will already have offset 0. */
3624 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
3626 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
3628 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3629 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3630 byte
= (subword_byte
% UNITS_PER_WORD
3631 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3634 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3635 so if it's become negative it will instead be very large.) */
3636 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
3638 /* Convert from bytes to chunks of size value_bit. */
3639 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
3641 /* Re-pack the value. */
3643 if (VECTOR_MODE_P (outermode
))
3645 num_elem
= GET_MODE_NUNITS (outermode
);
3646 result_v
= rtvec_alloc (num_elem
);
3647 elems
= &RTVEC_ELT (result_v
, 0);
3648 outer_submode
= GET_MODE_INNER (outermode
);
3654 outer_submode
= outermode
;
3657 outer_class
= GET_MODE_CLASS (outer_submode
);
3658 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
3660 gcc_assert (elem_bitsize
% value_bit
== 0);
3661 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
3663 for (elem
= 0; elem
< num_elem
; elem
++)
3667 /* Vectors are stored in target memory order. (This is probably
3670 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3671 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3673 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3674 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3675 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3676 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3677 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3680 switch (outer_class
)
3683 case MODE_PARTIAL_INT
:
3685 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
3688 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3690 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
3691 for (; i
< elem_bitsize
; i
+= value_bit
)
3692 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
3693 << (i
- HOST_BITS_PER_WIDE_INT
));
3695 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3697 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3698 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
3700 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
3707 long tmp
[max_bitsize
/ 32];
3709 /* real_from_target wants its input in words affected by
3710 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3711 and use WORDS_BIG_ENDIAN instead; see the documentation
3712 of SUBREG in rtl.texi. */
3713 for (i
= 0; i
< max_bitsize
/ 32; i
++)
3715 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
3718 if (WORDS_BIG_ENDIAN
)
3719 ibase
= elem_bitsize
- 1 - i
;
3722 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
3725 real_from_target (&r
, tmp
, outer_submode
);
3726 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
3734 if (VECTOR_MODE_P (outermode
))
3735 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
3740 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3741 Return 0 if no simplifications are possible. */
3743 simplify_subreg (enum machine_mode outermode
, rtx op
,
3744 enum machine_mode innermode
, unsigned int byte
)
3746 /* Little bit of sanity checking. */
3747 gcc_assert (innermode
!= VOIDmode
);
3748 gcc_assert (outermode
!= VOIDmode
);
3749 gcc_assert (innermode
!= BLKmode
);
3750 gcc_assert (outermode
!= BLKmode
);
3752 gcc_assert (GET_MODE (op
) == innermode
3753 || GET_MODE (op
) == VOIDmode
);
3755 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
3756 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
3758 if (outermode
== innermode
&& !byte
)
3761 if (GET_CODE (op
) == CONST_INT
3762 || GET_CODE (op
) == CONST_DOUBLE
3763 || GET_CODE (op
) == CONST_VECTOR
)
3764 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
3766 /* Changing mode twice with SUBREG => just change it once,
3767 or not at all if changing back op starting mode. */
3768 if (GET_CODE (op
) == SUBREG
)
3770 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3771 int final_offset
= byte
+ SUBREG_BYTE (op
);
3774 if (outermode
== innermostmode
3775 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3776 return SUBREG_REG (op
);
3778 /* The SUBREG_BYTE represents offset, as if the value were stored
3779 in memory. Irritating exception is paradoxical subreg, where
3780 we define SUBREG_BYTE to be 0. On big endian machines, this
3781 value should be negative. For a moment, undo this exception. */
3782 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3784 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3785 if (WORDS_BIG_ENDIAN
)
3786 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3787 if (BYTES_BIG_ENDIAN
)
3788 final_offset
+= difference
% UNITS_PER_WORD
;
3790 if (SUBREG_BYTE (op
) == 0
3791 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3793 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3794 if (WORDS_BIG_ENDIAN
)
3795 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3796 if (BYTES_BIG_ENDIAN
)
3797 final_offset
+= difference
% UNITS_PER_WORD
;
3800 /* See whether resulting subreg will be paradoxical. */
3801 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3803 /* In nonparadoxical subregs we can't handle negative offsets. */
3804 if (final_offset
< 0)
3806 /* Bail out in case resulting subreg would be incorrect. */
3807 if (final_offset
% GET_MODE_SIZE (outermode
)
3808 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3814 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3816 /* In paradoxical subreg, see if we are still looking on lower part.
3817 If so, our SUBREG_BYTE will be 0. */
3818 if (WORDS_BIG_ENDIAN
)
3819 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3820 if (BYTES_BIG_ENDIAN
)
3821 offset
+= difference
% UNITS_PER_WORD
;
3822 if (offset
== final_offset
)
3828 /* Recurse for further possible simplifications. */
3829 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
3833 if (validate_subreg (outermode
, innermostmode
,
3834 SUBREG_REG (op
), final_offset
))
3835 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3839 /* SUBREG of a hard register => just change the register number
3840 and/or mode. If the hard register is not valid in that mode,
3841 suppress this simplification. If the hard register is the stack,
3842 frame, or argument pointer, leave this as a SUBREG. */
3845 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3846 #ifdef CANNOT_CHANGE_MODE_CLASS
3847 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3848 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3849 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3851 && ((reload_completed
&& !frame_pointer_needed
)
3852 || (REGNO (op
) != FRAME_POINTER_REGNUM
3853 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3854 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3857 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3858 && REGNO (op
) != ARG_POINTER_REGNUM
3860 && REGNO (op
) != STACK_POINTER_REGNUM
3861 && subreg_offset_representable_p (REGNO (op
), innermode
,
3864 unsigned int regno
= REGNO (op
);
3865 unsigned int final_regno
3866 = regno
+ subreg_regno_offset (regno
, innermode
, byte
, outermode
);
3868 /* ??? We do allow it if the current REG is not valid for
3869 its mode. This is a kludge to work around how float/complex
3870 arguments are passed on 32-bit SPARC and should be fixed. */
3871 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3872 || ! HARD_REGNO_MODE_OK (regno
, innermode
))
3874 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3876 /* Propagate original regno. We don't have any way to specify
3877 the offset inside original regno, so do so only for lowpart.
3878 The information is used only by alias analysis that can not
3879 grog partial register anyway. */
3881 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3882 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3887 /* If we have a SUBREG of a register that we are replacing and we are
3888 replacing it with a MEM, make a new MEM and try replacing the
3889 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3890 or if we would be widening it. */
3893 && ! mode_dependent_address_p (XEXP (op
, 0))
3894 /* Allow splitting of volatile memory references in case we don't
3895 have instruction to move the whole thing. */
3896 && (! MEM_VOLATILE_P (op
)
3897 || ! have_insn_for (SET
, innermode
))
3898 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3899 return adjust_address_nv (op
, outermode
, byte
);
3901 /* Handle complex values represented as CONCAT
3902 of real and imaginary part. */
3903 if (GET_CODE (op
) == CONCAT
)
3905 unsigned int inner_size
, final_offset
;
3908 inner_size
= GET_MODE_UNIT_SIZE (innermode
);
3909 part
= byte
< inner_size
? XEXP (op
, 0) : XEXP (op
, 1);
3910 final_offset
= byte
% inner_size
;
3911 if (final_offset
+ GET_MODE_SIZE (outermode
) > inner_size
)
3914 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3917 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
3918 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3922 /* Optimize SUBREG truncations of zero and sign extended values. */
3923 if ((GET_CODE (op
) == ZERO_EXTEND
3924 || GET_CODE (op
) == SIGN_EXTEND
)
3925 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
3927 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
3929 /* If we're requesting the lowpart of a zero or sign extension,
3930 there are three possibilities. If the outermode is the same
3931 as the origmode, we can omit both the extension and the subreg.
3932 If the outermode is not larger than the origmode, we can apply
3933 the truncation without the extension. Finally, if the outermode
3934 is larger than the origmode, but both are integer modes, we
3935 can just extend to the appropriate mode. */
3938 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
3939 if (outermode
== origmode
)
3940 return XEXP (op
, 0);
3941 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
3942 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
3943 subreg_lowpart_offset (outermode
,
3945 if (SCALAR_INT_MODE_P (outermode
))
3946 return simplify_gen_unary (GET_CODE (op
), outermode
,
3947 XEXP (op
, 0), origmode
);
3950 /* A SUBREG resulting from a zero extension may fold to zero if
3951 it extracts higher bits that the ZERO_EXTEND's source bits. */
3952 if (GET_CODE (op
) == ZERO_EXTEND
3953 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
3954 return CONST0_RTX (outermode
);
3957 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3958 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3959 the outer subreg is effectively a truncation to the original mode. */
3960 if ((GET_CODE (op
) == LSHIFTRT
3961 || GET_CODE (op
) == ASHIFTRT
)
3962 && SCALAR_INT_MODE_P (outermode
)
3963 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3964 to avoid the possibility that an outer LSHIFTRT shifts by more
3965 than the sign extension's sign_bit_copies and introduces zeros
3966 into the high bits of the result. */
3967 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
3968 && GET_CODE (XEXP (op
, 1)) == CONST_INT
3969 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
3970 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
3971 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
3972 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
3973 return simplify_gen_binary (ASHIFTRT
, outermode
,
3974 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
3976 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
3977 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
3978 the outer subreg is effectively a truncation to the original mode. */
3979 if ((GET_CODE (op
) == LSHIFTRT
3980 || GET_CODE (op
) == ASHIFTRT
)
3981 && SCALAR_INT_MODE_P (outermode
)
3982 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
3983 && GET_CODE (XEXP (op
, 1)) == CONST_INT
3984 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
3985 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
3986 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
3987 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
3988 return simplify_gen_binary (LSHIFTRT
, outermode
,
3989 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
3991 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
3992 to (ashift:QI (x:QI) C), where C is a suitable small constant and
3993 the outer subreg is effectively a truncation to the original mode. */
3994 if (GET_CODE (op
) == ASHIFT
3995 && SCALAR_INT_MODE_P (outermode
)
3996 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
3997 && GET_CODE (XEXP (op
, 1)) == CONST_INT
3998 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
3999 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
4000 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4001 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4002 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4003 return simplify_gen_binary (ASHIFT
, outermode
,
4004 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4009 /* Make a SUBREG operation or equivalent if it folds. */
4012 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
4013 enum machine_mode innermode
, unsigned int byte
)
4017 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
4021 if (GET_CODE (op
) == SUBREG
4022 || GET_CODE (op
) == CONCAT
4023 || GET_MODE (op
) == VOIDmode
)
4026 if (validate_subreg (outermode
, innermode
, op
, byte
))
4027 return gen_rtx_SUBREG (outermode
, op
, byte
);
4032 /* Simplify X, an rtx expression.
4034 Return the simplified expression or NULL if no simplifications
4037 This is the preferred entry point into the simplification routines;
4038 however, we still allow passes to call the more specific routines.
4040 Right now GCC has three (yes, three) major bodies of RTL simplification
4041 code that need to be unified.
4043 1. fold_rtx in cse.c. This code uses various CSE specific
4044 information to aid in RTL simplification.
4046 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4047 it uses combine specific information to aid in RTL
4050 3. The routines in this file.
4053 Long term we want to only have one body of simplification code; to
4054 get to that state I recommend the following steps:
4056 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4057 which are not pass dependent state into these routines.
4059 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4060 use this routine whenever possible.
4062 3. Allow for pass dependent state to be provided to these
4063 routines and add simplifications based on the pass dependent
4064 state. Remove code from cse.c & combine.c that becomes
4067 It will take time, but ultimately the compiler will be easier to
4068 maintain and improve. It's totally silly that when we add a
4069 simplification that it needs to be added to 4 places (3 for RTL
4070 simplification and 1 for tree simplification. */
4073 simplify_rtx (rtx x
)
4075 enum rtx_code code
= GET_CODE (x
);
4076 enum machine_mode mode
= GET_MODE (x
);
4078 switch (GET_RTX_CLASS (code
))
4081 return simplify_unary_operation (code
, mode
,
4082 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
4083 case RTX_COMM_ARITH
:
4084 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
4085 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
4087 /* Fall through.... */
4090 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
4093 case RTX_BITFIELD_OPS
:
4094 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
4095 XEXP (x
, 0), XEXP (x
, 1),
4099 case RTX_COMM_COMPARE
:
4100 return simplify_relational_operation (code
, mode
,
4101 ((GET_MODE (XEXP (x
, 0))
4103 ? GET_MODE (XEXP (x
, 0))
4104 : GET_MODE (XEXP (x
, 1))),
4110 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
4111 GET_MODE (SUBREG_REG (x
)),
4118 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4119 if (GET_CODE (XEXP (x
, 0)) == HIGH
4120 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))