1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static bool plus_minus_operand_p (rtx
);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
61 enum machine_mode
, rtx
, rtx
);
62 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode
, rtx i
)
71 return gen_int_mode (- INTVAL (i
), mode
);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode
, rtx x
)
80 unsigned HOST_WIDE_INT val
;
83 if (GET_MODE_CLASS (mode
) != MODE_INT
)
86 width
= GET_MODE_BITSIZE (mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x
) == CONST_INT
)
93 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x
) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x
) == 0)
97 val
= CONST_DOUBLE_HIGH (x
);
98 width
-= HOST_BITS_PER_WIDE_INT
;
103 if (width
< HOST_BITS_PER_WIDE_INT
)
104 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
105 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
117 /* Put complex operands first and constants second if commutative. */
118 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
119 && swap_commutative_operands_p (op0
, op1
))
120 tem
= op0
, op0
= op1
, op1
= tem
;
122 /* If this simplifies, do it. */
123 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
127 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x
)
136 enum machine_mode cmode
;
137 HOST_WIDE_INT offset
= 0;
139 switch (GET_CODE (x
))
145 /* Handle float extensions of constant pool references. */
147 c
= avoid_constant_pool_reference (tmp
);
148 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
152 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr
= targetm
.delegitimize_address (addr
);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr
) == CONST
168 && GET_CODE (XEXP (addr
, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
171 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
172 addr
= XEXP (XEXP (addr
, 0), 0);
175 if (GET_CODE (addr
) == LO_SUM
)
176 addr
= XEXP (addr
, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr
) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr
))
183 c
= get_pool_constant (addr
);
184 cmode
= get_pool_mode (addr
);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset
!= 0 || cmode
!= GET_MODE (x
))
191 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
192 if (tem
&& CONSTANT_P (tem
))
202 /* Return true if X is a MEM referencing the constant pool. */
205 constant_pool_reference_p (rtx x
)
207 return avoid_constant_pool_reference (x
) != x
;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
215 enum machine_mode op_mode
)
219 /* If this simplifies, use it. */
220 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
223 return gen_rtx_fmt_e (code
, mode
, op
);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
230 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
234 /* If this simplifies, use it. */
235 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
239 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
247 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
251 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
255 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x
, rtx old_rtx
, rtx new_rtx
)
264 enum rtx_code code
= GET_CODE (x
);
265 enum machine_mode mode
= GET_MODE (x
);
266 enum machine_mode op_mode
;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
276 switch (GET_RTX_CLASS (code
))
280 op_mode
= GET_MODE (op0
);
281 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
282 if (op0
== XEXP (x
, 0))
284 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
288 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
289 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
290 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
292 return simplify_gen_binary (code
, mode
, op0
, op1
);
295 case RTX_COMM_COMPARE
:
298 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
299 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
300 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
301 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
303 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
306 case RTX_BITFIELD_OPS
:
308 op_mode
= GET_MODE (op0
);
309 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
310 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
311 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
312 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
314 if (op_mode
== VOIDmode
)
315 op_mode
= GET_MODE (op0
);
316 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
319 /* The only case we try to handle is a SUBREG. */
322 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
323 if (op0
== SUBREG_REG (x
))
325 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
326 GET_MODE (SUBREG_REG (x
)),
328 return op0
? op0
: x
;
335 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
336 if (op0
== XEXP (x
, 0))
338 return replace_equiv_address_nv (x
, op0
);
340 else if (code
== LO_SUM
)
342 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
343 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
349 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
351 return gen_rtx_LO_SUM (mode
, op0
, op1
);
353 else if (code
== REG
)
355 if (rtx_equal_p (x
, old_rtx
))
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
371 rtx op
, enum machine_mode op_mode
)
375 if (GET_CODE (op
) == CONST
)
378 trueop
= avoid_constant_pool_reference (op
);
380 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
384 return simplify_unary_operation_1 (code
, mode
, op
);
387 /* Perform some simplifications we can do even if the operands
390 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
392 enum rtx_code reversed
;
398 /* (not (not X)) == X. */
399 if (GET_CODE (op
) == NOT
)
402 /* (not (eq X Y)) == (ne X Y), etc. */
403 if (COMPARISON_P (op
)
404 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
405 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
406 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
407 XEXP (op
, 0), XEXP (op
, 1));
409 /* (not (plus X -1)) can become (neg X). */
410 if (GET_CODE (op
) == PLUS
411 && XEXP (op
, 1) == constm1_rtx
)
412 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
414 /* Similarly, (not (neg X)) is (plus X -1). */
415 if (GET_CODE (op
) == NEG
)
416 return plus_constant (XEXP (op
, 0), -1);
418 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
419 if (GET_CODE (op
) == XOR
420 && GET_CODE (XEXP (op
, 1)) == CONST_INT
421 && (temp
= simplify_unary_operation (NOT
, mode
,
422 XEXP (op
, 1), mode
)) != 0)
423 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
425 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
426 if (GET_CODE (op
) == PLUS
427 && GET_CODE (XEXP (op
, 1)) == CONST_INT
428 && mode_signbit_p (mode
, XEXP (op
, 1))
429 && (temp
= simplify_unary_operation (NOT
, mode
,
430 XEXP (op
, 1), mode
)) != 0)
431 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
434 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
435 operands other than 1, but that is not valid. We could do a
436 similar simplification for (not (lshiftrt C X)) where C is
437 just the sign bit, but this doesn't seem common enough to
439 if (GET_CODE (op
) == ASHIFT
440 && XEXP (op
, 0) == const1_rtx
)
442 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
443 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
446 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
447 by reversing the comparison code if valid. */
448 if (STORE_FLAG_VALUE
== -1
450 && (reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
)
451 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
452 XEXP (op
, 0), XEXP (op
, 1));
454 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
455 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
456 so we can perform the above simplification. */
458 if (STORE_FLAG_VALUE
== -1
459 && GET_CODE (op
) == ASHIFTRT
460 && GET_CODE (XEXP (op
, 1)) == CONST_INT
461 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
462 return simplify_gen_relational (GE
, mode
, VOIDmode
,
463 XEXP (op
, 0), const0_rtx
);
468 /* (neg (neg X)) == X. */
469 if (GET_CODE (op
) == NEG
)
472 /* (neg (plus X 1)) can become (not X). */
473 if (GET_CODE (op
) == PLUS
474 && XEXP (op
, 1) == const1_rtx
)
475 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
477 /* Similarly, (neg (not X)) is (plus X 1). */
478 if (GET_CODE (op
) == NOT
)
479 return plus_constant (XEXP (op
, 0), 1);
481 /* (neg (minus X Y)) can become (minus Y X). This transformation
482 isn't safe for modes with signed zeros, since if X and Y are
483 both +0, (minus Y X) is the same as (minus X Y). If the
484 rounding mode is towards +infinity (or -infinity) then the two
485 expressions will be rounded differently. */
486 if (GET_CODE (op
) == MINUS
487 && !HONOR_SIGNED_ZEROS (mode
)
488 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
489 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
491 if (GET_CODE (op
) == PLUS
492 && !HONOR_SIGNED_ZEROS (mode
)
493 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
495 /* (neg (plus A C)) is simplified to (minus -C A). */
496 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
497 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
499 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
501 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
504 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
505 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
506 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
509 /* (neg (mult A B)) becomes (mult (neg A) B).
510 This works even for floating-point values. */
511 if (GET_CODE (op
) == MULT
512 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
514 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
515 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
518 /* NEG commutes with ASHIFT since it is multiplication. Only do
519 this if we can then eliminate the NEG (e.g., if the operand
521 if (GET_CODE (op
) == ASHIFT
)
523 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
525 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
528 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
529 C is equal to the width of MODE minus 1. */
530 if (GET_CODE (op
) == ASHIFTRT
531 && GET_CODE (XEXP (op
, 1)) == CONST_INT
532 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
533 return simplify_gen_binary (LSHIFTRT
, mode
,
534 XEXP (op
, 0), XEXP (op
, 1));
536 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
537 C is equal to the width of MODE minus 1. */
538 if (GET_CODE (op
) == LSHIFTRT
539 && GET_CODE (XEXP (op
, 1)) == CONST_INT
540 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
541 return simplify_gen_binary (ASHIFTRT
, mode
,
542 XEXP (op
, 0), XEXP (op
, 1));
547 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
548 becomes just the MINUS if its mode is MODE. This allows
549 folding switch statements on machines using casesi (such as
551 if (GET_CODE (op
) == TRUNCATE
552 && GET_MODE (XEXP (op
, 0)) == mode
553 && GET_CODE (XEXP (op
, 0)) == MINUS
554 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
555 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
558 /* Check for a sign extension of a subreg of a promoted
559 variable, where the promotion is sign-extended, and the
560 target mode is the same as the variable's promotion. */
561 if (GET_CODE (op
) == SUBREG
562 && SUBREG_PROMOTED_VAR_P (op
)
563 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
564 && GET_MODE (XEXP (op
, 0)) == mode
)
567 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
568 if (! POINTERS_EXTEND_UNSIGNED
569 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
571 || (GET_CODE (op
) == SUBREG
572 && REG_P (SUBREG_REG (op
))
573 && REG_POINTER (SUBREG_REG (op
))
574 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
575 return convert_memory_address (Pmode
, op
);
580 /* Check for a zero extension of a subreg of a promoted
581 variable, where the promotion is zero-extended, and the
582 target mode is the same as the variable's promotion. */
583 if (GET_CODE (op
) == SUBREG
584 && SUBREG_PROMOTED_VAR_P (op
)
585 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
586 && GET_MODE (XEXP (op
, 0)) == mode
)
589 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
590 if (POINTERS_EXTEND_UNSIGNED
> 0
591 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
593 || (GET_CODE (op
) == SUBREG
594 && REG_P (SUBREG_REG (op
))
595 && REG_POINTER (SUBREG_REG (op
))
596 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
597 return convert_memory_address (Pmode
, op
);
608 /* Try to compute the value of a unary operation CODE whose output mode is to
609 be MODE with input operand OP whose mode was originally OP_MODE.
610 Return zero if the value cannot be computed. */
612 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
613 rtx op
, enum machine_mode op_mode
)
615 unsigned int width
= GET_MODE_BITSIZE (mode
);
617 if (code
== VEC_DUPLICATE
)
619 gcc_assert (VECTOR_MODE_P (mode
));
620 if (GET_MODE (op
) != VOIDmode
)
622 if (!VECTOR_MODE_P (GET_MODE (op
)))
623 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
625 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
628 if (GET_CODE (op
) == CONST_INT
|| GET_CODE (op
) == CONST_DOUBLE
629 || GET_CODE (op
) == CONST_VECTOR
)
631 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
632 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
633 rtvec v
= rtvec_alloc (n_elts
);
636 if (GET_CODE (op
) != CONST_VECTOR
)
637 for (i
= 0; i
< n_elts
; i
++)
638 RTVEC_ELT (v
, i
) = op
;
641 enum machine_mode inmode
= GET_MODE (op
);
642 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
643 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
645 gcc_assert (in_n_elts
< n_elts
);
646 gcc_assert ((n_elts
% in_n_elts
) == 0);
647 for (i
= 0; i
< n_elts
; i
++)
648 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
650 return gen_rtx_CONST_VECTOR (mode
, v
);
654 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
656 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
657 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
658 enum machine_mode opmode
= GET_MODE (op
);
659 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
660 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
661 rtvec v
= rtvec_alloc (n_elts
);
664 gcc_assert (op_n_elts
== n_elts
);
665 for (i
= 0; i
< n_elts
; i
++)
667 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
668 CONST_VECTOR_ELT (op
, i
),
669 GET_MODE_INNER (opmode
));
672 RTVEC_ELT (v
, i
) = x
;
674 return gen_rtx_CONST_VECTOR (mode
, v
);
677 /* The order of these tests is critical so that, for example, we don't
678 check the wrong mode (input vs. output) for a conversion operation,
679 such as FIX. At some point, this should be simplified. */
681 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
682 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
684 HOST_WIDE_INT hv
, lv
;
687 if (GET_CODE (op
) == CONST_INT
)
688 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
690 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
692 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
693 d
= real_value_truncate (mode
, d
);
694 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
696 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
697 && (GET_CODE (op
) == CONST_DOUBLE
698 || GET_CODE (op
) == CONST_INT
))
700 HOST_WIDE_INT hv
, lv
;
703 if (GET_CODE (op
) == CONST_INT
)
704 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
706 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
708 if (op_mode
== VOIDmode
)
710 /* We don't know how to interpret negative-looking numbers in
711 this case, so don't try to fold those. */
715 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
718 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
720 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
721 d
= real_value_truncate (mode
, d
);
722 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
725 if (GET_CODE (op
) == CONST_INT
726 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
728 HOST_WIDE_INT arg0
= INTVAL (op
);
742 val
= (arg0
>= 0 ? arg0
: - arg0
);
746 /* Don't use ffs here. Instead, get low order bit and then its
747 number. If arg0 is zero, this will return 0, as desired. */
748 arg0
&= GET_MODE_MASK (mode
);
749 val
= exact_log2 (arg0
& (- arg0
)) + 1;
753 arg0
&= GET_MODE_MASK (mode
);
754 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
757 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
761 arg0
&= GET_MODE_MASK (mode
);
764 /* Even if the value at zero is undefined, we have to come
765 up with some replacement. Seems good enough. */
766 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
767 val
= GET_MODE_BITSIZE (mode
);
770 val
= exact_log2 (arg0
& -arg0
);
774 arg0
&= GET_MODE_MASK (mode
);
777 val
++, arg0
&= arg0
- 1;
781 arg0
&= GET_MODE_MASK (mode
);
784 val
++, arg0
&= arg0
- 1;
793 /* When zero-extending a CONST_INT, we need to know its
795 gcc_assert (op_mode
!= VOIDmode
);
796 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
798 /* If we were really extending the mode,
799 we would have to distinguish between zero-extension
800 and sign-extension. */
801 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
804 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
805 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
811 if (op_mode
== VOIDmode
)
813 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
815 /* If we were really extending the mode,
816 we would have to distinguish between zero-extension
817 and sign-extension. */
818 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
821 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
824 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
826 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
827 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
844 return gen_int_mode (val
, mode
);
847 /* We can do some operations on integer CONST_DOUBLEs. Also allow
848 for a DImode operation on a CONST_INT. */
849 else if (GET_MODE (op
) == VOIDmode
850 && width
<= HOST_BITS_PER_WIDE_INT
* 2
851 && (GET_CODE (op
) == CONST_DOUBLE
852 || GET_CODE (op
) == CONST_INT
))
854 unsigned HOST_WIDE_INT l1
, lv
;
855 HOST_WIDE_INT h1
, hv
;
857 if (GET_CODE (op
) == CONST_DOUBLE
)
858 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
860 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
870 neg_double (l1
, h1
, &lv
, &hv
);
875 neg_double (l1
, h1
, &lv
, &hv
);
887 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
890 lv
= exact_log2 (l1
& -l1
) + 1;
896 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
897 - HOST_BITS_PER_WIDE_INT
;
899 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
900 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
901 lv
= GET_MODE_BITSIZE (mode
);
907 lv
= exact_log2 (l1
& -l1
);
909 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
910 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
911 lv
= GET_MODE_BITSIZE (mode
);
934 /* This is just a change-of-mode, so do nothing. */
939 gcc_assert (op_mode
!= VOIDmode
);
941 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
945 lv
= l1
& GET_MODE_MASK (op_mode
);
949 if (op_mode
== VOIDmode
950 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
954 lv
= l1
& GET_MODE_MASK (op_mode
);
955 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
956 && (lv
& ((HOST_WIDE_INT
) 1
957 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
958 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
960 hv
= HWI_SIGN_EXTEND (lv
);
971 return immed_double_const (lv
, hv
, mode
);
974 else if (GET_CODE (op
) == CONST_DOUBLE
975 && SCALAR_FLOAT_MODE_P (mode
))
977 REAL_VALUE_TYPE d
, t
;
978 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
983 if (HONOR_SNANS (mode
) && real_isnan (&d
))
985 real_sqrt (&t
, mode
, &d
);
989 d
= REAL_VALUE_ABS (d
);
992 d
= REAL_VALUE_NEGATE (d
);
995 d
= real_value_truncate (mode
, d
);
998 /* All this does is change the mode. */
1001 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1008 real_to_target (tmp
, &d
, GET_MODE (op
));
1009 for (i
= 0; i
< 4; i
++)
1011 real_from_target (&d
, tmp
, mode
);
1017 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1020 else if (GET_CODE (op
) == CONST_DOUBLE
1021 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1022 && GET_MODE_CLASS (mode
) == MODE_INT
1023 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1025 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1026 operators are intentionally left unspecified (to ease implementation
1027 by target backends), for consistency, this routine implements the
1028 same semantics for constant folding as used by the middle-end. */
1030 /* This was formerly used only for non-IEEE float.
1031 eggert@twinsun.com says it is safe for IEEE also. */
1032 HOST_WIDE_INT xh
, xl
, th
, tl
;
1033 REAL_VALUE_TYPE x
, t
;
1034 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1038 if (REAL_VALUE_ISNAN (x
))
1041 /* Test against the signed upper bound. */
1042 if (width
> HOST_BITS_PER_WIDE_INT
)
1044 th
= ((unsigned HOST_WIDE_INT
) 1
1045 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1051 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1053 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1054 if (REAL_VALUES_LESS (t
, x
))
1061 /* Test against the signed lower bound. */
1062 if (width
> HOST_BITS_PER_WIDE_INT
)
1064 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1070 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1072 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1073 if (REAL_VALUES_LESS (x
, t
))
1079 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1083 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1086 /* Test against the unsigned upper bound. */
1087 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1092 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1094 th
= ((unsigned HOST_WIDE_INT
) 1
1095 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1101 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1103 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1104 if (REAL_VALUES_LESS (t
, x
))
1111 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1117 return immed_double_const (xl
, xh
, mode
);
1123 /* Subroutine of simplify_binary_operation to simplify a commutative,
1124 associative binary operation CODE with result mode MODE, operating
1125 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1126 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1127 canonicalization is possible. */
1130 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1135 /* Linearize the operator to the left. */
1136 if (GET_CODE (op1
) == code
)
1138 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1139 if (GET_CODE (op0
) == code
)
1141 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1142 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1145 /* "a op (b op c)" becomes "(b op c) op a". */
1146 if (! swap_commutative_operands_p (op1
, op0
))
1147 return simplify_gen_binary (code
, mode
, op1
, op0
);
1154 if (GET_CODE (op0
) == code
)
1156 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1157 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1159 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1160 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1163 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1164 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1165 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1166 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1168 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1170 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1171 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1172 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1173 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1175 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1182 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1183 and OP1. Return 0 if no simplification is possible.
1185 Don't use this for relational operations such as EQ or LT.
1186 Use simplify_relational_operation instead. */
1188 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1191 rtx trueop0
, trueop1
;
1194 /* Relational operations don't work here. We must know the mode
1195 of the operands in order to do the comparison correctly.
1196 Assuming a full word can give incorrect results.
1197 Consider comparing 128 with -128 in QImode. */
1198 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1199 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1201 /* Make sure the constant is second. */
1202 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1203 && swap_commutative_operands_p (op0
, op1
))
1205 tem
= op0
, op0
= op1
, op1
= tem
;
1208 trueop0
= avoid_constant_pool_reference (op0
);
1209 trueop1
= avoid_constant_pool_reference (op1
);
1211 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1214 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1218 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1219 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1223 unsigned int width
= GET_MODE_BITSIZE (mode
);
1225 /* Even if we can't compute a constant result,
1226 there are some cases worth simplifying. */
1231 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1232 when x is NaN, infinite, or finite and nonzero. They aren't
1233 when x is -0 and the rounding mode is not towards -infinity,
1234 since (-0) + 0 is then 0. */
1235 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1238 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1239 transformations are safe even for IEEE. */
1240 if (GET_CODE (op0
) == NEG
)
1241 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1242 else if (GET_CODE (op1
) == NEG
)
1243 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1245 /* (~a) + 1 -> -a */
1246 if (INTEGRAL_MODE_P (mode
)
1247 && GET_CODE (op0
) == NOT
1248 && trueop1
== const1_rtx
)
1249 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1251 /* Handle both-operands-constant cases. We can only add
1252 CONST_INTs to constants since the sum of relocatable symbols
1253 can't be handled by most assemblers. Don't add CONST_INT
1254 to CONST_INT since overflow won't be computed properly if wider
1255 than HOST_BITS_PER_WIDE_INT. */
1257 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1258 && GET_CODE (op1
) == CONST_INT
)
1259 return plus_constant (op0
, INTVAL (op1
));
1260 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1261 && GET_CODE (op0
) == CONST_INT
)
1262 return plus_constant (op1
, INTVAL (op0
));
1264 /* See if this is something like X * C - X or vice versa or
1265 if the multiplication is written as a shift. If so, we can
1266 distribute and make a new multiply, shift, or maybe just
1267 have X (if C is 2 in the example above). But don't make
1268 something more expensive than we had before. */
1270 if (SCALAR_INT_MODE_P (mode
))
1272 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1273 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1274 rtx lhs
= op0
, rhs
= op1
;
1276 if (GET_CODE (lhs
) == NEG
)
1280 lhs
= XEXP (lhs
, 0);
1282 else if (GET_CODE (lhs
) == MULT
1283 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1285 coeff0l
= INTVAL (XEXP (lhs
, 1));
1286 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1287 lhs
= XEXP (lhs
, 0);
1289 else if (GET_CODE (lhs
) == ASHIFT
1290 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1291 && INTVAL (XEXP (lhs
, 1)) >= 0
1292 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1294 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1296 lhs
= XEXP (lhs
, 0);
1299 if (GET_CODE (rhs
) == NEG
)
1303 rhs
= XEXP (rhs
, 0);
1305 else if (GET_CODE (rhs
) == MULT
1306 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1308 coeff1l
= INTVAL (XEXP (rhs
, 1));
1309 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1310 rhs
= XEXP (rhs
, 0);
1312 else if (GET_CODE (rhs
) == ASHIFT
1313 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1314 && INTVAL (XEXP (rhs
, 1)) >= 0
1315 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1317 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1319 rhs
= XEXP (rhs
, 0);
1322 if (rtx_equal_p (lhs
, rhs
))
1324 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1326 unsigned HOST_WIDE_INT l
;
1329 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1330 coeff
= immed_double_const (l
, h
, mode
);
1332 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1333 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1338 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1339 if ((GET_CODE (op1
) == CONST_INT
1340 || GET_CODE (op1
) == CONST_DOUBLE
)
1341 && GET_CODE (op0
) == XOR
1342 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1343 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1344 && mode_signbit_p (mode
, op1
))
1345 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1346 simplify_gen_binary (XOR
, mode
, op1
,
1349 /* If one of the operands is a PLUS or a MINUS, see if we can
1350 simplify this by the associative law.
1351 Don't use the associative law for floating point.
1352 The inaccuracy makes it nonassociative,
1353 and subtle programs can break if operations are associated. */
1355 if (INTEGRAL_MODE_P (mode
)
1356 && (plus_minus_operand_p (op0
)
1357 || plus_minus_operand_p (op1
))
1358 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1361 /* Reassociate floating point addition only when the user
1362 specifies unsafe math optimizations. */
1363 if (FLOAT_MODE_P (mode
)
1364 && flag_unsafe_math_optimizations
)
1366 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1374 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1375 using cc0, in which case we want to leave it as a COMPARE
1376 so we can distinguish it from a register-register-copy.
1378 In IEEE floating point, x-0 is not the same as x. */
1380 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1381 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1382 && trueop1
== CONST0_RTX (mode
))
1386 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1387 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1388 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1389 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1391 rtx xop00
= XEXP (op0
, 0);
1392 rtx xop10
= XEXP (op1
, 0);
1395 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1397 if (REG_P (xop00
) && REG_P (xop10
)
1398 && GET_MODE (xop00
) == GET_MODE (xop10
)
1399 && REGNO (xop00
) == REGNO (xop10
)
1400 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1401 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1408 /* We can't assume x-x is 0 even with non-IEEE floating point,
1409 but since it is zero except in very strange circumstances, we
1410 will treat it as zero with -funsafe-math-optimizations. */
1411 if (rtx_equal_p (trueop0
, trueop1
)
1412 && ! side_effects_p (op0
)
1413 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1414 return CONST0_RTX (mode
);
1416 /* Change subtraction from zero into negation. (0 - x) is the
1417 same as -x when x is NaN, infinite, or finite and nonzero.
1418 But if the mode has signed zeros, and does not round towards
1419 -infinity, then 0 - 0 is 0, not -0. */
1420 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1421 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1423 /* (-1 - a) is ~a. */
1424 if (trueop0
== constm1_rtx
)
1425 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1427 /* Subtracting 0 has no effect unless the mode has signed zeros
1428 and supports rounding towards -infinity. In such a case,
1430 if (!(HONOR_SIGNED_ZEROS (mode
)
1431 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1432 && trueop1
== CONST0_RTX (mode
))
1435 /* See if this is something like X * C - X or vice versa or
1436 if the multiplication is written as a shift. If so, we can
1437 distribute and make a new multiply, shift, or maybe just
1438 have X (if C is 2 in the example above). But don't make
1439 something more expensive than we had before. */
1441 if (SCALAR_INT_MODE_P (mode
))
1443 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1444 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1445 rtx lhs
= op0
, rhs
= op1
;
1447 if (GET_CODE (lhs
) == NEG
)
1451 lhs
= XEXP (lhs
, 0);
1453 else if (GET_CODE (lhs
) == MULT
1454 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1456 coeff0l
= INTVAL (XEXP (lhs
, 1));
1457 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1458 lhs
= XEXP (lhs
, 0);
1460 else if (GET_CODE (lhs
) == ASHIFT
1461 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1462 && INTVAL (XEXP (lhs
, 1)) >= 0
1463 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1465 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1467 lhs
= XEXP (lhs
, 0);
1470 if (GET_CODE (rhs
) == NEG
)
1474 rhs
= XEXP (rhs
, 0);
1476 else if (GET_CODE (rhs
) == MULT
1477 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1479 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1480 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1481 rhs
= XEXP (rhs
, 0);
1483 else if (GET_CODE (rhs
) == ASHIFT
1484 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1485 && INTVAL (XEXP (rhs
, 1)) >= 0
1486 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1488 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1490 rhs
= XEXP (rhs
, 0);
1493 if (rtx_equal_p (lhs
, rhs
))
1495 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1497 unsigned HOST_WIDE_INT l
;
1500 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1501 coeff
= immed_double_const (l
, h
, mode
);
1503 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1504 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1509 /* (a - (-b)) -> (a + b). True even for IEEE. */
1510 if (GET_CODE (op1
) == NEG
)
1511 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1513 /* (-x - c) may be simplified as (-c - x). */
1514 if (GET_CODE (op0
) == NEG
1515 && (GET_CODE (op1
) == CONST_INT
1516 || GET_CODE (op1
) == CONST_DOUBLE
))
1518 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1520 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1523 /* Don't let a relocatable value get a negative coeff. */
1524 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1525 return simplify_gen_binary (PLUS
, mode
,
1527 neg_const_int (mode
, op1
));
1529 /* (x - (x & y)) -> (x & ~y) */
1530 if (GET_CODE (op1
) == AND
)
1532 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1534 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1535 GET_MODE (XEXP (op1
, 1)));
1536 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1538 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1540 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1541 GET_MODE (XEXP (op1
, 0)));
1542 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1546 /* If one of the operands is a PLUS or a MINUS, see if we can
1547 simplify this by the associative law. This will, for example,
1548 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1549 Don't use the associative law for floating point.
1550 The inaccuracy makes it nonassociative,
1551 and subtle programs can break if operations are associated. */
1553 if (INTEGRAL_MODE_P (mode
)
1554 && (plus_minus_operand_p (op0
)
1555 || plus_minus_operand_p (op1
))
1556 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1561 if (trueop1
== constm1_rtx
)
1562 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1564 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1565 x is NaN, since x * 0 is then also NaN. Nor is it valid
1566 when the mode has signed zeros, since multiplying a negative
1567 number by 0 will give -0, not 0. */
1568 if (!HONOR_NANS (mode
)
1569 && !HONOR_SIGNED_ZEROS (mode
)
1570 && trueop1
== CONST0_RTX (mode
)
1571 && ! side_effects_p (op0
))
1574 /* In IEEE floating point, x*1 is not equivalent to x for
1576 if (!HONOR_SNANS (mode
)
1577 && trueop1
== CONST1_RTX (mode
))
1580 /* Convert multiply by constant power of two into shift unless
1581 we are still generating RTL. This test is a kludge. */
1582 if (GET_CODE (trueop1
) == CONST_INT
1583 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1584 /* If the mode is larger than the host word size, and the
1585 uppermost bit is set, then this isn't a power of two due
1586 to implicit sign extension. */
1587 && (width
<= HOST_BITS_PER_WIDE_INT
1588 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1589 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1591 /* Likewise for multipliers wider than a word. */
1592 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1593 && (GET_MODE (trueop1
) == VOIDmode
1594 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
1595 && GET_MODE (op0
) == mode
1596 && CONST_DOUBLE_LOW (trueop1
) == 0
1597 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
1598 return simplify_gen_binary (ASHIFT
, mode
, op0
,
1599 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
1601 /* x*2 is x+x and x*(-1) is -x */
1602 if (GET_CODE (trueop1
) == CONST_DOUBLE
1603 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
1604 && GET_MODE (op0
) == mode
)
1607 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1609 if (REAL_VALUES_EQUAL (d
, dconst2
))
1610 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1612 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1613 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1616 /* Reassociate multiplication, but for floating point MULTs
1617 only when the user specifies unsafe math optimizations. */
1618 if (! FLOAT_MODE_P (mode
)
1619 || flag_unsafe_math_optimizations
)
1621 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1628 if (trueop1
== const0_rtx
)
1630 if (GET_CODE (trueop1
) == CONST_INT
1631 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1632 == GET_MODE_MASK (mode
)))
1634 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1636 /* A | (~A) -> -1 */
1637 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1638 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1639 && ! side_effects_p (op0
)
1640 && SCALAR_INT_MODE_P (mode
))
1642 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1648 if (trueop1
== const0_rtx
)
1650 if (GET_CODE (trueop1
) == CONST_INT
1651 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1652 == GET_MODE_MASK (mode
)))
1653 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1654 if (rtx_equal_p (trueop0
, trueop1
)
1655 && ! side_effects_p (op0
)
1656 && GET_MODE_CLASS (mode
) != MODE_CC
)
1657 return CONST0_RTX (mode
);
1659 /* Canonicalize XOR of the most significant bit to PLUS. */
1660 if ((GET_CODE (op1
) == CONST_INT
1661 || GET_CODE (op1
) == CONST_DOUBLE
)
1662 && mode_signbit_p (mode
, op1
))
1663 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
1664 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1665 if ((GET_CODE (op1
) == CONST_INT
1666 || GET_CODE (op1
) == CONST_DOUBLE
)
1667 && GET_CODE (op0
) == PLUS
1668 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1669 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1670 && mode_signbit_p (mode
, XEXP (op0
, 1)))
1671 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1672 simplify_gen_binary (XOR
, mode
, op1
,
1675 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1681 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
1683 /* If we are turning off bits already known off in OP0, we need
1685 if (GET_CODE (trueop1
) == CONST_INT
1686 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1687 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
1689 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
1690 && GET_MODE_CLASS (mode
) != MODE_CC
)
1693 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1694 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1695 && ! side_effects_p (op0
)
1696 && GET_MODE_CLASS (mode
) != MODE_CC
)
1697 return CONST0_RTX (mode
);
1699 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1700 there are no nonzero bits of C outside of X's mode. */
1701 if ((GET_CODE (op0
) == SIGN_EXTEND
1702 || GET_CODE (op0
) == ZERO_EXTEND
)
1703 && GET_CODE (trueop1
) == CONST_INT
1704 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1705 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
1706 & INTVAL (trueop1
)) == 0)
1708 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
1709 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
1710 gen_int_mode (INTVAL (trueop1
),
1712 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
1715 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1716 ((A & N) + B) & M -> (A + B) & M
1717 Similarly if (N & M) == 0,
1718 ((A | N) + B) & M -> (A + B) & M
1719 and for - instead of + and/or ^ instead of |. */
1720 if (GET_CODE (trueop1
) == CONST_INT
1721 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1722 && ~INTVAL (trueop1
)
1723 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
1724 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
1729 pmop
[0] = XEXP (op0
, 0);
1730 pmop
[1] = XEXP (op0
, 1);
1732 for (which
= 0; which
< 2; which
++)
1735 switch (GET_CODE (tem
))
1738 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1739 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
1740 == INTVAL (trueop1
))
1741 pmop
[which
] = XEXP (tem
, 0);
1745 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1746 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
1747 pmop
[which
] = XEXP (tem
, 0);
1754 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
1756 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
1758 return simplify_gen_binary (code
, mode
, tem
, op1
);
1761 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1767 /* 0/x is 0 (or x&0 if x has side-effects). */
1768 if (trueop0
== CONST0_RTX (mode
))
1770 if (side_effects_p (op1
))
1771 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1775 if (trueop1
== CONST1_RTX (mode
))
1776 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
1777 /* Convert divide by power of two into shift. */
1778 if (GET_CODE (trueop1
) == CONST_INT
1779 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
1780 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
1784 /* Handle floating point and integers separately. */
1785 if (SCALAR_FLOAT_MODE_P (mode
))
1787 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1788 safe for modes with NaNs, since 0.0 / 0.0 will then be
1789 NaN rather than 0.0. Nor is it safe for modes with signed
1790 zeros, since dividing 0 by a negative number gives -0.0 */
1791 if (trueop0
== CONST0_RTX (mode
)
1792 && !HONOR_NANS (mode
)
1793 && !HONOR_SIGNED_ZEROS (mode
)
1794 && ! side_effects_p (op1
))
1797 if (trueop1
== CONST1_RTX (mode
)
1798 && !HONOR_SNANS (mode
))
1801 if (GET_CODE (trueop1
) == CONST_DOUBLE
1802 && trueop1
!= CONST0_RTX (mode
))
1805 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1808 if (REAL_VALUES_EQUAL (d
, dconstm1
)
1809 && !HONOR_SNANS (mode
))
1810 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1812 /* Change FP division by a constant into multiplication.
1813 Only do this with -funsafe-math-optimizations. */
1814 if (flag_unsafe_math_optimizations
1815 && !REAL_VALUES_EQUAL (d
, dconst0
))
1817 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
1818 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1819 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1825 /* 0/x is 0 (or x&0 if x has side-effects). */
1826 if (trueop0
== CONST0_RTX (mode
))
1828 if (side_effects_p (op1
))
1829 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1833 if (trueop1
== CONST1_RTX (mode
))
1834 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
1836 if (trueop1
== constm1_rtx
)
1838 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
1839 return simplify_gen_unary (NEG
, mode
, x
, mode
);
1845 /* 0%x is 0 (or x&0 if x has side-effects). */
1846 if (trueop0
== CONST0_RTX (mode
))
1848 if (side_effects_p (op1
))
1849 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1852 /* x%1 is 0 (of x&0 if x has side-effects). */
1853 if (trueop1
== CONST1_RTX (mode
))
1855 if (side_effects_p (op0
))
1856 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
1857 return CONST0_RTX (mode
);
1859 /* Implement modulus by power of two as AND. */
1860 if (GET_CODE (trueop1
) == CONST_INT
1861 && exact_log2 (INTVAL (trueop1
)) > 0)
1862 return simplify_gen_binary (AND
, mode
, op0
,
1863 GEN_INT (INTVAL (op1
) - 1));
1867 /* 0%x is 0 (or x&0 if x has side-effects). */
1868 if (trueop0
== CONST0_RTX (mode
))
1870 if (side_effects_p (op1
))
1871 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1874 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1875 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
1877 if (side_effects_p (op0
))
1878 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
1879 return CONST0_RTX (mode
);
1886 /* Rotating ~0 always results in ~0. */
1887 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1888 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1889 && ! side_effects_p (op1
))
1892 /* Fall through.... */
1896 if (trueop1
== CONST0_RTX (mode
))
1898 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
1903 if (width
<= HOST_BITS_PER_WIDE_INT
1904 && GET_CODE (trueop1
) == CONST_INT
1905 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1906 && ! side_effects_p (op0
))
1908 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1910 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1916 if (width
<= HOST_BITS_PER_WIDE_INT
1917 && GET_CODE (trueop1
) == CONST_INT
1918 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1919 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1920 && ! side_effects_p (op0
))
1922 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1924 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1930 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
1932 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1934 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1940 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1942 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1944 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1953 /* ??? There are simplifications that can be done. */
1957 if (!VECTOR_MODE_P (mode
))
1959 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
1960 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
1961 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
1962 gcc_assert (XVECLEN (trueop1
, 0) == 1);
1963 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
1965 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1966 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
1971 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
1972 gcc_assert (GET_MODE_INNER (mode
)
1973 == GET_MODE_INNER (GET_MODE (trueop0
)));
1974 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
1976 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1978 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1979 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1980 rtvec v
= rtvec_alloc (n_elts
);
1983 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
1984 for (i
= 0; i
< n_elts
; i
++)
1986 rtx x
= XVECEXP (trueop1
, 0, i
);
1988 gcc_assert (GET_CODE (x
) == CONST_INT
);
1989 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
1993 return gen_rtx_CONST_VECTOR (mode
, v
);
1999 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2000 ? GET_MODE (trueop0
)
2001 : GET_MODE_INNER (mode
));
2002 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2003 ? GET_MODE (trueop1
)
2004 : GET_MODE_INNER (mode
));
2006 gcc_assert (VECTOR_MODE_P (mode
));
2007 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2008 == GET_MODE_SIZE (mode
));
2010 if (VECTOR_MODE_P (op0_mode
))
2011 gcc_assert (GET_MODE_INNER (mode
)
2012 == GET_MODE_INNER (op0_mode
));
2014 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2016 if (VECTOR_MODE_P (op1_mode
))
2017 gcc_assert (GET_MODE_INNER (mode
)
2018 == GET_MODE_INNER (op1_mode
));
2020 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2022 if ((GET_CODE (trueop0
) == CONST_VECTOR
2023 || GET_CODE (trueop0
) == CONST_INT
2024 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2025 && (GET_CODE (trueop1
) == CONST_VECTOR
2026 || GET_CODE (trueop1
) == CONST_INT
2027 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2029 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2030 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2031 rtvec v
= rtvec_alloc (n_elts
);
2033 unsigned in_n_elts
= 1;
2035 if (VECTOR_MODE_P (op0_mode
))
2036 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2037 for (i
= 0; i
< n_elts
; i
++)
2041 if (!VECTOR_MODE_P (op0_mode
))
2042 RTVEC_ELT (v
, i
) = trueop0
;
2044 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2048 if (!VECTOR_MODE_P (op1_mode
))
2049 RTVEC_ELT (v
, i
) = trueop1
;
2051 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2056 return gen_rtx_CONST_VECTOR (mode
, v
);
2069 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2072 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
2074 unsigned int width
= GET_MODE_BITSIZE (mode
);
2076 if (VECTOR_MODE_P (mode
)
2077 && code
!= VEC_CONCAT
2078 && GET_CODE (op0
) == CONST_VECTOR
2079 && GET_CODE (op1
) == CONST_VECTOR
)
2081 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2082 enum machine_mode op0mode
= GET_MODE (op0
);
2083 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
2084 enum machine_mode op1mode
= GET_MODE (op1
);
2085 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
2086 rtvec v
= rtvec_alloc (n_elts
);
2089 gcc_assert (op0_n_elts
== n_elts
);
2090 gcc_assert (op1_n_elts
== n_elts
);
2091 for (i
= 0; i
< n_elts
; i
++)
2093 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
2094 CONST_VECTOR_ELT (op0
, i
),
2095 CONST_VECTOR_ELT (op1
, i
));
2098 RTVEC_ELT (v
, i
) = x
;
2101 return gen_rtx_CONST_VECTOR (mode
, v
);
2104 if (VECTOR_MODE_P (mode
)
2105 && code
== VEC_CONCAT
2106 && CONSTANT_P (op0
) && CONSTANT_P (op1
))
2108 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2109 rtvec v
= rtvec_alloc (n_elts
);
2111 gcc_assert (n_elts
>= 2);
2114 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
2115 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
2117 RTVEC_ELT (v
, 0) = op0
;
2118 RTVEC_ELT (v
, 1) = op1
;
2122 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
2123 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
2126 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
2127 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
2128 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
2130 for (i
= 0; i
< op0_n_elts
; ++i
)
2131 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
2132 for (i
= 0; i
< op1_n_elts
; ++i
)
2133 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
2136 return gen_rtx_CONST_VECTOR (mode
, v
);
2139 if (SCALAR_FLOAT_MODE_P (mode
)
2140 && GET_CODE (op0
) == CONST_DOUBLE
2141 && GET_CODE (op1
) == CONST_DOUBLE
2142 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
2153 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
2155 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
2157 for (i
= 0; i
< 4; i
++)
2174 real_from_target (&r
, tmp0
, mode
);
2175 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
2179 REAL_VALUE_TYPE f0
, f1
, value
, result
;
2182 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
2183 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
2184 real_convert (&f0
, mode
, &f0
);
2185 real_convert (&f1
, mode
, &f1
);
2187 if (HONOR_SNANS (mode
)
2188 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
2192 && REAL_VALUES_EQUAL (f1
, dconst0
)
2193 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
2196 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2197 && flag_trapping_math
2198 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
2200 int s0
= REAL_VALUE_NEGATIVE (f0
);
2201 int s1
= REAL_VALUE_NEGATIVE (f1
);
2206 /* Inf + -Inf = NaN plus exception. */
2211 /* Inf - Inf = NaN plus exception. */
2216 /* Inf / Inf = NaN plus exception. */
2223 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2224 && flag_trapping_math
2225 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
2226 || (REAL_VALUE_ISINF (f1
)
2227 && REAL_VALUES_EQUAL (f0
, dconst0
))))
2228 /* Inf * 0 = NaN plus exception. */
2231 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
2233 real_convert (&result
, mode
, &value
);
2235 /* Don't constant fold this floating point operation if
2236 the result has overflowed and flag_trapping_math. */
2238 if (flag_trapping_math
2239 && MODE_HAS_INFINITIES (mode
)
2240 && REAL_VALUE_ISINF (result
)
2241 && !REAL_VALUE_ISINF (f0
)
2242 && !REAL_VALUE_ISINF (f1
))
2243 /* Overflow plus exception. */
2246 /* Don't constant fold this floating point operation if the
2247 result may dependent upon the run-time rounding mode and
2248 flag_rounding_math is set, or if GCC's software emulation
2249 is unable to accurately represent the result. */
2251 if ((flag_rounding_math
2252 || (REAL_MODE_FORMAT_COMPOSITE_P (mode
)
2253 && !flag_unsafe_math_optimizations
))
2254 && (inexact
|| !real_identical (&result
, &value
)))
2257 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
2261 /* We can fold some multi-word operations. */
2262 if (GET_MODE_CLASS (mode
) == MODE_INT
2263 && width
== HOST_BITS_PER_WIDE_INT
* 2
2264 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
2265 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
2267 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
2268 HOST_WIDE_INT h1
, h2
, hv
, ht
;
2270 if (GET_CODE (op0
) == CONST_DOUBLE
)
2271 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
2273 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
2275 if (GET_CODE (op1
) == CONST_DOUBLE
)
2276 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
2278 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
2283 /* A - B == A + (-B). */
2284 neg_double (l2
, h2
, &lv
, &hv
);
2287 /* Fall through.... */
2290 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2294 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2298 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2299 &lv
, &hv
, <
, &ht
))
2304 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2305 <
, &ht
, &lv
, &hv
))
2310 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2311 &lv
, &hv
, <
, &ht
))
2316 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2317 <
, &ht
, &lv
, &hv
))
2322 lv
= l1
& l2
, hv
= h1
& h2
;
2326 lv
= l1
| l2
, hv
= h1
| h2
;
2330 lv
= l1
^ l2
, hv
= h1
^ h2
;
2336 && ((unsigned HOST_WIDE_INT
) l1
2337 < (unsigned HOST_WIDE_INT
) l2
)))
2346 && ((unsigned HOST_WIDE_INT
) l1
2347 > (unsigned HOST_WIDE_INT
) l2
)))
2354 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
2356 && ((unsigned HOST_WIDE_INT
) l1
2357 < (unsigned HOST_WIDE_INT
) l2
)))
2364 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
2366 && ((unsigned HOST_WIDE_INT
) l1
2367 > (unsigned HOST_WIDE_INT
) l2
)))
2373 case LSHIFTRT
: case ASHIFTRT
:
2375 case ROTATE
: case ROTATERT
:
2376 if (SHIFT_COUNT_TRUNCATED
)
2377 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
2379 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
2382 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
2383 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
2385 else if (code
== ASHIFT
)
2386 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
2387 else if (code
== ROTATE
)
2388 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
2389 else /* code == ROTATERT */
2390 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
2397 return immed_double_const (lv
, hv
, mode
);
2400 if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
2401 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
2403 /* Get the integer argument values in two forms:
2404 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2406 arg0
= INTVAL (op0
);
2407 arg1
= INTVAL (op1
);
2409 if (width
< HOST_BITS_PER_WIDE_INT
)
2411 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2412 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2415 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2416 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2419 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2420 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2428 /* Compute the value of the arithmetic. */
2433 val
= arg0s
+ arg1s
;
2437 val
= arg0s
- arg1s
;
2441 val
= arg0s
* arg1s
;
2446 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2449 val
= arg0s
/ arg1s
;
2454 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2457 val
= arg0s
% arg1s
;
2462 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2465 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2470 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2473 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2491 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
2492 the value is in range. We can't return any old value for
2493 out-of-range arguments because either the middle-end (via
2494 shift_truncation_mask) or the back-end might be relying on
2495 target-specific knowledge. Nor can we rely on
2496 shift_truncation_mask, since the shift might not be part of an
2497 ashlM3, lshrM3 or ashrM3 instruction. */
2498 if (SHIFT_COUNT_TRUNCATED
)
2499 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
2500 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
2503 val
= (code
== ASHIFT
2504 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
2505 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
2507 /* Sign-extend the result for arithmetic right shifts. */
2508 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
2509 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
2517 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2518 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2526 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2527 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2531 /* Do nothing here. */
2535 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2539 val
= ((unsigned HOST_WIDE_INT
) arg0
2540 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2544 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2548 val
= ((unsigned HOST_WIDE_INT
) arg0
2549 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2556 /* ??? There are simplifications that can be done. */
2563 return gen_int_mode (val
, mode
);
2571 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2574 Rather than test for specific case, we do this by a brute-force method
2575 and do all possible simplifications until no more changes occur. Then
2576 we rebuild the operation. */
2578 struct simplify_plus_minus_op_data
2586 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2588 const struct simplify_plus_minus_op_data
*d1
= p1
;
2589 const struct simplify_plus_minus_op_data
*d2
= p2
;
2592 result
= (commutative_operand_precedence (d2
->op
)
2593 - commutative_operand_precedence (d1
->op
));
2596 return d1
->ix
- d2
->ix
;
2600 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2603 struct simplify_plus_minus_op_data ops
[8];
2605 int n_ops
= 2, input_ops
= 2;
2606 int first
, changed
, canonicalized
= 0;
2609 memset (ops
, 0, sizeof ops
);
2611 /* Set up the two operands and then expand them until nothing has been
2612 changed. If we run out of room in our array, give up; this should
2613 almost never happen. */
2618 ops
[1].neg
= (code
== MINUS
);
2624 for (i
= 0; i
< n_ops
; i
++)
2626 rtx this_op
= ops
[i
].op
;
2627 int this_neg
= ops
[i
].neg
;
2628 enum rtx_code this_code
= GET_CODE (this_op
);
2637 ops
[n_ops
].op
= XEXP (this_op
, 1);
2638 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2641 ops
[i
].op
= XEXP (this_op
, 0);
2644 canonicalized
|= this_neg
;
2648 ops
[i
].op
= XEXP (this_op
, 0);
2649 ops
[i
].neg
= ! this_neg
;
2656 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2657 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2658 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2660 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2661 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2662 ops
[n_ops
].neg
= this_neg
;
2670 /* ~a -> (-a - 1) */
2673 ops
[n_ops
].op
= constm1_rtx
;
2674 ops
[n_ops
++].neg
= this_neg
;
2675 ops
[i
].op
= XEXP (this_op
, 0);
2676 ops
[i
].neg
= !this_neg
;
2685 ops
[i
].op
= neg_const_int (mode
, this_op
);
2699 gcc_assert (n_ops
>= 2);
2702 int n_constants
= 0;
2704 for (i
= 0; i
< n_ops
; i
++)
2705 if (GET_CODE (ops
[i
].op
) == CONST_INT
)
2708 if (n_constants
<= 1)
2712 /* If we only have two operands, we can avoid the loops. */
2715 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
2718 /* Get the two operands. Be careful with the order, especially for
2719 the cases where code == MINUS. */
2720 if (ops
[0].neg
&& ops
[1].neg
)
2722 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
2725 else if (ops
[0].neg
)
2736 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
2739 /* Now simplify each pair of operands until nothing changes. The first
2740 time through just simplify constants against each other. */
2747 for (i
= 0; i
< n_ops
- 1; i
++)
2748 for (j
= i
+ 1; j
< n_ops
; j
++)
2750 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2751 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2753 if (lhs
!= 0 && rhs
!= 0
2754 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2756 enum rtx_code ncode
= PLUS
;
2762 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2764 else if (swap_commutative_operands_p (lhs
, rhs
))
2765 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2767 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2769 /* Reject "simplifications" that just wrap the two
2770 arguments in a CONST. Failure to do so can result
2771 in infinite recursion with simplify_binary_operation
2772 when it calls us to simplify CONST operations. */
2774 && ! (GET_CODE (tem
) == CONST
2775 && GET_CODE (XEXP (tem
, 0)) == ncode
2776 && XEXP (XEXP (tem
, 0), 0) == lhs
2777 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2778 /* Don't allow -x + -1 -> ~x simplifications in the
2779 first pass. This allows us the chance to combine
2780 the -1 with other constants. */
2782 && GET_CODE (tem
) == NOT
2783 && XEXP (tem
, 0) == rhs
))
2786 if (GET_CODE (tem
) == NEG
)
2787 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2788 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2789 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2793 ops
[j
].op
= NULL_RTX
;
2803 /* Pack all the operands to the lower-numbered entries. */
2804 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2808 /* Stabilize sort. */
2814 /* Sort the operations based on swap_commutative_operands_p. */
2815 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2817 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2819 && GET_CODE (ops
[1].op
) == CONST_INT
2820 && CONSTANT_P (ops
[0].op
)
2822 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2824 /* We suppressed creation of trivial CONST expressions in the
2825 combination loop to avoid recursion. Create one manually now.
2826 The combination loop should have ensured that there is exactly
2827 one CONST_INT, and the sort will have ensured that it is last
2828 in the array and that any other constant will be next-to-last. */
2831 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2832 && CONSTANT_P (ops
[n_ops
- 2].op
))
2834 rtx value
= ops
[n_ops
- 1].op
;
2835 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2836 value
= neg_const_int (mode
, value
);
2837 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2841 /* Put a non-negated operand first, if possible. */
2843 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2846 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
2855 /* Now make the result by performing the requested operations. */
2857 for (i
= 1; i
< n_ops
; i
++)
2858 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2859 mode
, result
, ops
[i
].op
);
2864 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2866 plus_minus_operand_p (rtx x
)
2868 return GET_CODE (x
) == PLUS
2869 || GET_CODE (x
) == MINUS
2870 || (GET_CODE (x
) == CONST
2871 && GET_CODE (XEXP (x
, 0)) == PLUS
2872 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
2873 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
2876 /* Like simplify_binary_operation except used for relational operators.
2877 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2878 not also be VOIDmode.
2880 CMP_MODE specifies in which mode the comparison is done in, so it is
2881 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2882 the operands or, if both are VOIDmode, the operands are compared in
2883 "infinite precision". */
2885 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2886 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2888 rtx tem
, trueop0
, trueop1
;
2890 if (cmp_mode
== VOIDmode
)
2891 cmp_mode
= GET_MODE (op0
);
2892 if (cmp_mode
== VOIDmode
)
2893 cmp_mode
= GET_MODE (op1
);
2895 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
2898 if (SCALAR_FLOAT_MODE_P (mode
))
2900 if (tem
== const0_rtx
)
2901 return CONST0_RTX (mode
);
2902 #ifdef FLOAT_STORE_FLAG_VALUE
2904 REAL_VALUE_TYPE val
;
2905 val
= FLOAT_STORE_FLAG_VALUE (mode
);
2906 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
2912 if (VECTOR_MODE_P (mode
))
2914 if (tem
== const0_rtx
)
2915 return CONST0_RTX (mode
);
2916 #ifdef VECTOR_STORE_FLAG_VALUE
2921 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
2922 if (val
== NULL_RTX
)
2924 if (val
== const1_rtx
)
2925 return CONST1_RTX (mode
);
2927 units
= GET_MODE_NUNITS (mode
);
2928 v
= rtvec_alloc (units
);
2929 for (i
= 0; i
< units
; i
++)
2930 RTVEC_ELT (v
, i
) = val
;
2931 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
2941 /* For the following tests, ensure const0_rtx is op1. */
2942 if (swap_commutative_operands_p (op0
, op1
)
2943 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
2944 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
2946 /* If op0 is a compare, extract the comparison arguments from it. */
2947 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2948 return simplify_relational_operation (code
, mode
, VOIDmode
,
2949 XEXP (op0
, 0), XEXP (op0
, 1));
2951 if (mode
== VOIDmode
2952 || GET_MODE_CLASS (cmp_mode
) == MODE_CC
2956 trueop0
= avoid_constant_pool_reference (op0
);
2957 trueop1
= avoid_constant_pool_reference (op1
);
2958 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
2962 /* This part of simplify_relational_operation is only used when CMP_MODE
2963 is not in class MODE_CC (i.e. it is a real comparison).
2965 MODE is the mode of the result, while CMP_MODE specifies in which
2966 mode the comparison is done in, so it is the mode of the operands. */
2969 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2970 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2972 enum rtx_code op0code
= GET_CODE (op0
);
2974 if (GET_CODE (op1
) == CONST_INT
)
2976 if (INTVAL (op1
) == 0 && COMPARISON_P (op0
))
2978 /* If op0 is a comparison, extract the comparison arguments form it. */
2981 if (GET_MODE (op0
) == mode
)
2982 return simplify_rtx (op0
);
2984 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
2985 XEXP (op0
, 0), XEXP (op0
, 1));
2987 else if (code
== EQ
)
2989 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
2990 if (new_code
!= UNKNOWN
)
2991 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
2992 XEXP (op0
, 0), XEXP (op0
, 1));
2997 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2998 if ((code
== EQ
|| code
== NE
)
2999 && (op0code
== PLUS
|| op0code
== MINUS
)
3001 && CONSTANT_P (XEXP (op0
, 1))
3002 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
3004 rtx x
= XEXP (op0
, 0);
3005 rtx c
= XEXP (op0
, 1);
3007 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
3009 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
3012 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3013 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3015 && op1
== const0_rtx
3016 && GET_MODE_CLASS (mode
) == MODE_INT
3017 && cmp_mode
!= VOIDmode
3018 /* ??? Work-around BImode bugs in the ia64 backend. */
3020 && cmp_mode
!= BImode
3021 && nonzero_bits (op0
, cmp_mode
) == 1
3022 && STORE_FLAG_VALUE
== 1)
3023 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
3024 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
3025 : lowpart_subreg (mode
, op0
, cmp_mode
);
3030 /* Check if the given comparison (done in the given MODE) is actually a
3031 tautology or a contradiction.
3032 If no simplification is possible, this function returns zero.
3033 Otherwise, it returns either const_true_rtx or const0_rtx. */
3036 simplify_const_relational_operation (enum rtx_code code
,
3037 enum machine_mode mode
,
3040 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
3045 gcc_assert (mode
!= VOIDmode
3046 || (GET_MODE (op0
) == VOIDmode
3047 && GET_MODE (op1
) == VOIDmode
));
3049 /* If op0 is a compare, extract the comparison arguments from it. */
3050 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3052 op1
= XEXP (op0
, 1);
3053 op0
= XEXP (op0
, 0);
3055 if (GET_MODE (op0
) != VOIDmode
)
3056 mode
= GET_MODE (op0
);
3057 else if (GET_MODE (op1
) != VOIDmode
)
3058 mode
= GET_MODE (op1
);
3063 /* We can't simplify MODE_CC values since we don't know what the
3064 actual comparison is. */
3065 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
3068 /* Make sure the constant is second. */
3069 if (swap_commutative_operands_p (op0
, op1
))
3071 tem
= op0
, op0
= op1
, op1
= tem
;
3072 code
= swap_condition (code
);
3075 trueop0
= avoid_constant_pool_reference (op0
);
3076 trueop1
= avoid_constant_pool_reference (op1
);
3078 /* For integer comparisons of A and B maybe we can simplify A - B and can
3079 then simplify a comparison of that with zero. If A and B are both either
3080 a register or a CONST_INT, this can't help; testing for these cases will
3081 prevent infinite recursion here and speed things up.
3083 If CODE is an unsigned comparison, then we can never do this optimization,
3084 because it gives an incorrect result if the subtraction wraps around zero.
3085 ANSI C defines unsigned operations such that they never overflow, and
3086 thus such cases can not be ignored; but we cannot do it even for
3087 signed comparisons for languages such as Java, so test flag_wrapv. */
3089 if (!flag_wrapv
&& INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
3090 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
3091 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
3092 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
3093 /* We cannot do this for == or != if tem is a nonzero address. */
3094 && ((code
!= EQ
&& code
!= NE
) || ! nonzero_address_p (tem
))
3095 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
3096 return simplify_const_relational_operation (signed_condition (code
),
3097 mode
, tem
, const0_rtx
);
3099 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
3100 return const_true_rtx
;
3102 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
3105 /* For modes without NaNs, if the two operands are equal, we know the
3106 result except if they have side-effects. */
3107 if (! HONOR_NANS (GET_MODE (trueop0
))
3108 && rtx_equal_p (trueop0
, trueop1
)
3109 && ! side_effects_p (trueop0
))
3110 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
3112 /* If the operands are floating-point constants, see if we can fold
3114 else if (GET_CODE (trueop0
) == CONST_DOUBLE
3115 && GET_CODE (trueop1
) == CONST_DOUBLE
3116 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
3118 REAL_VALUE_TYPE d0
, d1
;
3120 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
3121 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
3123 /* Comparisons are unordered iff at least one of the values is NaN. */
3124 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
3134 return const_true_rtx
;
3147 equal
= REAL_VALUES_EQUAL (d0
, d1
);
3148 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
3149 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
3152 /* Otherwise, see if the operands are both integers. */
3153 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
3154 && (GET_CODE (trueop0
) == CONST_DOUBLE
3155 || GET_CODE (trueop0
) == CONST_INT
)
3156 && (GET_CODE (trueop1
) == CONST_DOUBLE
3157 || GET_CODE (trueop1
) == CONST_INT
))
3159 int width
= GET_MODE_BITSIZE (mode
);
3160 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
3161 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
3163 /* Get the two words comprising each integer constant. */
3164 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
3166 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
3167 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
3171 l0u
= l0s
= INTVAL (trueop0
);
3172 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
3175 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
3177 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
3178 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
3182 l1u
= l1s
= INTVAL (trueop1
);
3183 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
3186 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3187 we have to sign or zero-extend the values. */
3188 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
3190 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3191 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3193 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3194 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3196 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3197 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3199 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
3200 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
3202 equal
= (h0u
== h1u
&& l0u
== l1u
);
3203 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
3204 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
3205 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
3206 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
3209 /* Otherwise, there are some code-specific tests we can make. */
3212 /* Optimize comparisons with upper and lower bounds. */
3213 if (SCALAR_INT_MODE_P (mode
)
3214 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3227 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
3234 /* x >= min is always true. */
3235 if (rtx_equal_p (trueop1
, mmin
))
3236 tem
= const_true_rtx
;
3242 /* x <= max is always true. */
3243 if (rtx_equal_p (trueop1
, mmax
))
3244 tem
= const_true_rtx
;
3249 /* x > max is always false. */
3250 if (rtx_equal_p (trueop1
, mmax
))
3256 /* x < min is always false. */
3257 if (rtx_equal_p (trueop1
, mmin
))
3264 if (tem
== const0_rtx
3265 || tem
== const_true_rtx
)
3272 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3277 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3278 return const_true_rtx
;
3282 /* Optimize abs(x) < 0.0. */
3283 if (trueop1
== CONST0_RTX (mode
)
3284 && !HONOR_SNANS (mode
)
3285 && !(flag_wrapv
&& INTEGRAL_MODE_P (mode
)))
3287 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3289 if (GET_CODE (tem
) == ABS
)
3295 /* Optimize abs(x) >= 0.0. */
3296 if (trueop1
== CONST0_RTX (mode
)
3297 && !HONOR_NANS (mode
)
3298 && !(flag_wrapv
&& INTEGRAL_MODE_P (mode
)))
3300 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3302 if (GET_CODE (tem
) == ABS
)
3303 return const_true_rtx
;
3308 /* Optimize ! (abs(x) < 0.0). */
3309 if (trueop1
== CONST0_RTX (mode
))
3311 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3313 if (GET_CODE (tem
) == ABS
)
3314 return const_true_rtx
;
3325 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3331 return equal
? const_true_rtx
: const0_rtx
;
3334 return ! equal
? const_true_rtx
: const0_rtx
;
3337 return op0lt
? const_true_rtx
: const0_rtx
;
3340 return op1lt
? const_true_rtx
: const0_rtx
;
3342 return op0ltu
? const_true_rtx
: const0_rtx
;
3344 return op1ltu
? const_true_rtx
: const0_rtx
;
3347 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
3350 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
3352 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
3354 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
3356 return const_true_rtx
;
3364 /* Simplify CODE, an operation with result mode MODE and three operands,
3365 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3366 a constant. Return 0 if no simplifications is possible. */
3369 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
3370 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
3373 unsigned int width
= GET_MODE_BITSIZE (mode
);
3375 /* VOIDmode means "infinite" precision. */
3377 width
= HOST_BITS_PER_WIDE_INT
;
3383 if (GET_CODE (op0
) == CONST_INT
3384 && GET_CODE (op1
) == CONST_INT
3385 && GET_CODE (op2
) == CONST_INT
3386 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
3387 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
3389 /* Extracting a bit-field from a constant */
3390 HOST_WIDE_INT val
= INTVAL (op0
);
3392 if (BITS_BIG_ENDIAN
)
3393 val
>>= (GET_MODE_BITSIZE (op0_mode
)
3394 - INTVAL (op2
) - INTVAL (op1
));
3396 val
>>= INTVAL (op2
);
3398 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
3400 /* First zero-extend. */
3401 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
3402 /* If desired, propagate sign bit. */
3403 if (code
== SIGN_EXTRACT
3404 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
3405 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
3408 /* Clear the bits that don't belong in our mode,
3409 unless they and our sign bit are all one.
3410 So we get either a reasonable negative value or a reasonable
3411 unsigned value for this mode. */
3412 if (width
< HOST_BITS_PER_WIDE_INT
3413 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
3414 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
3415 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3417 return gen_int_mode (val
, mode
);
3422 if (GET_CODE (op0
) == CONST_INT
)
3423 return op0
!= const0_rtx
? op1
: op2
;
3425 /* Convert c ? a : a into "a". */
3426 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
3429 /* Convert a != b ? a : b into "a". */
3430 if (GET_CODE (op0
) == NE
3431 && ! side_effects_p (op0
)
3432 && ! HONOR_NANS (mode
)
3433 && ! HONOR_SIGNED_ZEROS (mode
)
3434 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3435 && rtx_equal_p (XEXP (op0
, 1), op2
))
3436 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3437 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3440 /* Convert a == b ? a : b into "b". */
3441 if (GET_CODE (op0
) == EQ
3442 && ! side_effects_p (op0
)
3443 && ! HONOR_NANS (mode
)
3444 && ! HONOR_SIGNED_ZEROS (mode
)
3445 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3446 && rtx_equal_p (XEXP (op0
, 1), op2
))
3447 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3448 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3451 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
3453 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
3454 ? GET_MODE (XEXP (op0
, 1))
3455 : GET_MODE (XEXP (op0
, 0)));
3458 /* Look for happy constants in op1 and op2. */
3459 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
3461 HOST_WIDE_INT t
= INTVAL (op1
);
3462 HOST_WIDE_INT f
= INTVAL (op2
);
3464 if (t
== STORE_FLAG_VALUE
&& f
== 0)
3465 code
= GET_CODE (op0
);
3466 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
3469 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
3477 return simplify_gen_relational (code
, mode
, cmp_mode
,
3478 XEXP (op0
, 0), XEXP (op0
, 1));
3481 if (cmp_mode
== VOIDmode
)
3482 cmp_mode
= op0_mode
;
3483 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
3484 cmp_mode
, XEXP (op0
, 0),
3487 /* See if any simplifications were possible. */
3490 if (GET_CODE (temp
) == CONST_INT
)
3491 return temp
== const0_rtx
? op2
: op1
;
3493 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
3499 gcc_assert (GET_MODE (op0
) == mode
);
3500 gcc_assert (GET_MODE (op1
) == mode
);
3501 gcc_assert (VECTOR_MODE_P (mode
));
3502 op2
= avoid_constant_pool_reference (op2
);
3503 if (GET_CODE (op2
) == CONST_INT
)
3505 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3506 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3507 int mask
= (1 << n_elts
) - 1;
3509 if (!(INTVAL (op2
) & mask
))
3511 if ((INTVAL (op2
) & mask
) == mask
)
3514 op0
= avoid_constant_pool_reference (op0
);
3515 op1
= avoid_constant_pool_reference (op1
);
3516 if (GET_CODE (op0
) == CONST_VECTOR
3517 && GET_CODE (op1
) == CONST_VECTOR
)
3519 rtvec v
= rtvec_alloc (n_elts
);
3522 for (i
= 0; i
< n_elts
; i
++)
3523 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
3524 ? CONST_VECTOR_ELT (op0
, i
)
3525 : CONST_VECTOR_ELT (op1
, i
));
3526 return gen_rtx_CONST_VECTOR (mode
, v
);
3538 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3539 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3541 Works by unpacking OP into a collection of 8-bit values
3542 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3543 and then repacking them again for OUTERMODE. */
3546 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
3547 enum machine_mode innermode
, unsigned int byte
)
3549 /* We support up to 512-bit values (for V8DFmode). */
3553 value_mask
= (1 << value_bit
) - 1
3555 unsigned char value
[max_bitsize
/ value_bit
];
3564 rtvec result_v
= NULL
;
3565 enum mode_class outer_class
;
3566 enum machine_mode outer_submode
;
3568 /* Some ports misuse CCmode. */
3569 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
3572 /* We have no way to represent a complex constant at the rtl level. */
3573 if (COMPLEX_MODE_P (outermode
))
3576 /* Unpack the value. */
3578 if (GET_CODE (op
) == CONST_VECTOR
)
3580 num_elem
= CONST_VECTOR_NUNITS (op
);
3581 elems
= &CONST_VECTOR_ELT (op
, 0);
3582 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
3588 elem_bitsize
= max_bitsize
;
3590 /* If this asserts, it is too complicated; reducing value_bit may help. */
3591 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
3592 /* I don't know how to handle endianness of sub-units. */
3593 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
3595 for (elem
= 0; elem
< num_elem
; elem
++)
3598 rtx el
= elems
[elem
];
3600 /* Vectors are kept in target memory order. (This is probably
3603 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3604 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3606 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3607 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3608 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3609 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3610 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3613 switch (GET_CODE (el
))
3617 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3619 *vp
++ = INTVAL (el
) >> i
;
3620 /* CONST_INTs are always logically sign-extended. */
3621 for (; i
< elem_bitsize
; i
+= value_bit
)
3622 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
3626 if (GET_MODE (el
) == VOIDmode
)
3628 /* If this triggers, someone should have generated a
3629 CONST_INT instead. */
3630 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
3632 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
3633 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
3634 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
3637 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
3640 /* It shouldn't matter what's done here, so fill it with
3642 for (; i
< elem_bitsize
; i
+= value_bit
)
3647 long tmp
[max_bitsize
/ 32];
3648 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
3650 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
3651 gcc_assert (bitsize
<= elem_bitsize
);
3652 gcc_assert (bitsize
% value_bit
== 0);
3654 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
3657 /* real_to_target produces its result in words affected by
3658 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3659 and use WORDS_BIG_ENDIAN instead; see the documentation
3660 of SUBREG in rtl.texi. */
3661 for (i
= 0; i
< bitsize
; i
+= value_bit
)
3664 if (WORDS_BIG_ENDIAN
)
3665 ibase
= bitsize
- 1 - i
;
3668 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
3671 /* It shouldn't matter what's done here, so fill it with
3673 for (; i
< elem_bitsize
; i
+= value_bit
)
3683 /* Now, pick the right byte to start with. */
3684 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3685 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3686 will already have offset 0. */
3687 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
3689 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
3691 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3692 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3693 byte
= (subword_byte
% UNITS_PER_WORD
3694 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3697 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3698 so if it's become negative it will instead be very large.) */
3699 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
3701 /* Convert from bytes to chunks of size value_bit. */
3702 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
3704 /* Re-pack the value. */
3706 if (VECTOR_MODE_P (outermode
))
3708 num_elem
= GET_MODE_NUNITS (outermode
);
3709 result_v
= rtvec_alloc (num_elem
);
3710 elems
= &RTVEC_ELT (result_v
, 0);
3711 outer_submode
= GET_MODE_INNER (outermode
);
3717 outer_submode
= outermode
;
3720 outer_class
= GET_MODE_CLASS (outer_submode
);
3721 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
3723 gcc_assert (elem_bitsize
% value_bit
== 0);
3724 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
3726 for (elem
= 0; elem
< num_elem
; elem
++)
3730 /* Vectors are stored in target memory order. (This is probably
3733 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3734 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3736 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3737 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3738 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3739 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3740 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3743 switch (outer_class
)
3746 case MODE_PARTIAL_INT
:
3748 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
3751 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3753 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
3754 for (; i
< elem_bitsize
; i
+= value_bit
)
3755 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
3756 << (i
- HOST_BITS_PER_WIDE_INT
));
3758 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3760 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3761 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
3762 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
3763 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
3772 long tmp
[max_bitsize
/ 32];
3774 /* real_from_target wants its input in words affected by
3775 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3776 and use WORDS_BIG_ENDIAN instead; see the documentation
3777 of SUBREG in rtl.texi. */
3778 for (i
= 0; i
< max_bitsize
/ 32; i
++)
3780 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
3783 if (WORDS_BIG_ENDIAN
)
3784 ibase
= elem_bitsize
- 1 - i
;
3787 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
3790 real_from_target (&r
, tmp
, outer_submode
);
3791 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
3799 if (VECTOR_MODE_P (outermode
))
3800 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
3805 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3806 Return 0 if no simplifications are possible. */
3808 simplify_subreg (enum machine_mode outermode
, rtx op
,
3809 enum machine_mode innermode
, unsigned int byte
)
3811 /* Little bit of sanity checking. */
3812 gcc_assert (innermode
!= VOIDmode
);
3813 gcc_assert (outermode
!= VOIDmode
);
3814 gcc_assert (innermode
!= BLKmode
);
3815 gcc_assert (outermode
!= BLKmode
);
3817 gcc_assert (GET_MODE (op
) == innermode
3818 || GET_MODE (op
) == VOIDmode
);
3820 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
3821 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
3823 if (outermode
== innermode
&& !byte
)
3826 if (GET_CODE (op
) == CONST_INT
3827 || GET_CODE (op
) == CONST_DOUBLE
3828 || GET_CODE (op
) == CONST_VECTOR
)
3829 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
3831 /* Changing mode twice with SUBREG => just change it once,
3832 or not at all if changing back op starting mode. */
3833 if (GET_CODE (op
) == SUBREG
)
3835 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3836 int final_offset
= byte
+ SUBREG_BYTE (op
);
3839 if (outermode
== innermostmode
3840 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3841 return SUBREG_REG (op
);
3843 /* The SUBREG_BYTE represents offset, as if the value were stored
3844 in memory. Irritating exception is paradoxical subreg, where
3845 we define SUBREG_BYTE to be 0. On big endian machines, this
3846 value should be negative. For a moment, undo this exception. */
3847 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3849 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3850 if (WORDS_BIG_ENDIAN
)
3851 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3852 if (BYTES_BIG_ENDIAN
)
3853 final_offset
+= difference
% UNITS_PER_WORD
;
3855 if (SUBREG_BYTE (op
) == 0
3856 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3858 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3859 if (WORDS_BIG_ENDIAN
)
3860 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3861 if (BYTES_BIG_ENDIAN
)
3862 final_offset
+= difference
% UNITS_PER_WORD
;
3865 /* See whether resulting subreg will be paradoxical. */
3866 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3868 /* In nonparadoxical subregs we can't handle negative offsets. */
3869 if (final_offset
< 0)
3871 /* Bail out in case resulting subreg would be incorrect. */
3872 if (final_offset
% GET_MODE_SIZE (outermode
)
3873 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3879 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3881 /* In paradoxical subreg, see if we are still looking on lower part.
3882 If so, our SUBREG_BYTE will be 0. */
3883 if (WORDS_BIG_ENDIAN
)
3884 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3885 if (BYTES_BIG_ENDIAN
)
3886 offset
+= difference
% UNITS_PER_WORD
;
3887 if (offset
== final_offset
)
3893 /* Recurse for further possible simplifications. */
3894 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
3898 if (validate_subreg (outermode
, innermostmode
,
3899 SUBREG_REG (op
), final_offset
))
3900 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3904 /* SUBREG of a hard register => just change the register number
3905 and/or mode. If the hard register is not valid in that mode,
3906 suppress this simplification. If the hard register is the stack,
3907 frame, or argument pointer, leave this as a SUBREG. */
3910 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3911 #ifdef CANNOT_CHANGE_MODE_CLASS
3912 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3913 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3914 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3916 && ((reload_completed
&& !frame_pointer_needed
)
3917 || (REGNO (op
) != FRAME_POINTER_REGNUM
3918 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3919 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3922 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3923 && REGNO (op
) != ARG_POINTER_REGNUM
3925 && REGNO (op
) != STACK_POINTER_REGNUM
3926 && subreg_offset_representable_p (REGNO (op
), innermode
,
3929 unsigned int regno
= REGNO (op
);
3930 unsigned int final_regno
3931 = regno
+ subreg_regno_offset (regno
, innermode
, byte
, outermode
);
3933 /* ??? We do allow it if the current REG is not valid for
3934 its mode. This is a kludge to work around how float/complex
3935 arguments are passed on 32-bit SPARC and should be fixed. */
3936 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3937 || ! HARD_REGNO_MODE_OK (regno
, innermode
))
3939 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3941 /* Propagate original regno. We don't have any way to specify
3942 the offset inside original regno, so do so only for lowpart.
3943 The information is used only by alias analysis that can not
3944 grog partial register anyway. */
3946 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3947 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3952 /* If we have a SUBREG of a register that we are replacing and we are
3953 replacing it with a MEM, make a new MEM and try replacing the
3954 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3955 or if we would be widening it. */
3958 && ! mode_dependent_address_p (XEXP (op
, 0))
3959 /* Allow splitting of volatile memory references in case we don't
3960 have instruction to move the whole thing. */
3961 && (! MEM_VOLATILE_P (op
)
3962 || ! have_insn_for (SET
, innermode
))
3963 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3964 return adjust_address_nv (op
, outermode
, byte
);
3966 /* Handle complex values represented as CONCAT
3967 of real and imaginary part. */
3968 if (GET_CODE (op
) == CONCAT
)
3970 unsigned int inner_size
, final_offset
;
3973 inner_size
= GET_MODE_UNIT_SIZE (innermode
);
3974 part
= byte
< inner_size
? XEXP (op
, 0) : XEXP (op
, 1);
3975 final_offset
= byte
% inner_size
;
3976 if (final_offset
+ GET_MODE_SIZE (outermode
) > inner_size
)
3979 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3982 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
3983 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3987 /* Optimize SUBREG truncations of zero and sign extended values. */
3988 if ((GET_CODE (op
) == ZERO_EXTEND
3989 || GET_CODE (op
) == SIGN_EXTEND
)
3990 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
3992 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
3994 /* If we're requesting the lowpart of a zero or sign extension,
3995 there are three possibilities. If the outermode is the same
3996 as the origmode, we can omit both the extension and the subreg.
3997 If the outermode is not larger than the origmode, we can apply
3998 the truncation without the extension. Finally, if the outermode
3999 is larger than the origmode, but both are integer modes, we
4000 can just extend to the appropriate mode. */
4003 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
4004 if (outermode
== origmode
)
4005 return XEXP (op
, 0);
4006 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
4007 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
4008 subreg_lowpart_offset (outermode
,
4010 if (SCALAR_INT_MODE_P (outermode
))
4011 return simplify_gen_unary (GET_CODE (op
), outermode
,
4012 XEXP (op
, 0), origmode
);
4015 /* A SUBREG resulting from a zero extension may fold to zero if
4016 it extracts higher bits that the ZERO_EXTEND's source bits. */
4017 if (GET_CODE (op
) == ZERO_EXTEND
4018 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
4019 return CONST0_RTX (outermode
);
4022 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4023 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4024 the outer subreg is effectively a truncation to the original mode. */
4025 if ((GET_CODE (op
) == LSHIFTRT
4026 || GET_CODE (op
) == ASHIFTRT
)
4027 && SCALAR_INT_MODE_P (outermode
)
4028 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4029 to avoid the possibility that an outer LSHIFTRT shifts by more
4030 than the sign extension's sign_bit_copies and introduces zeros
4031 into the high bits of the result. */
4032 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
4033 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4034 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
4035 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4036 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4037 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4038 return simplify_gen_binary (ASHIFTRT
, outermode
,
4039 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4041 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4042 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4043 the outer subreg is effectively a truncation to the original mode. */
4044 if ((GET_CODE (op
) == LSHIFTRT
4045 || GET_CODE (op
) == ASHIFTRT
)
4046 && SCALAR_INT_MODE_P (outermode
)
4047 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4048 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4049 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4050 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4051 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4052 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4053 return simplify_gen_binary (LSHIFTRT
, outermode
,
4054 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4056 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4057 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4058 the outer subreg is effectively a truncation to the original mode. */
4059 if (GET_CODE (op
) == ASHIFT
4060 && SCALAR_INT_MODE_P (outermode
)
4061 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4062 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4063 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4064 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
4065 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4066 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4067 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4068 return simplify_gen_binary (ASHIFT
, outermode
,
4069 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4074 /* Make a SUBREG operation or equivalent if it folds. */
4077 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
4078 enum machine_mode innermode
, unsigned int byte
)
4082 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
4086 if (GET_CODE (op
) == SUBREG
4087 || GET_CODE (op
) == CONCAT
4088 || GET_MODE (op
) == VOIDmode
)
4091 if (validate_subreg (outermode
, innermode
, op
, byte
))
4092 return gen_rtx_SUBREG (outermode
, op
, byte
);
4097 /* Simplify X, an rtx expression.
4099 Return the simplified expression or NULL if no simplifications
4102 This is the preferred entry point into the simplification routines;
4103 however, we still allow passes to call the more specific routines.
4105 Right now GCC has three (yes, three) major bodies of RTL simplification
4106 code that need to be unified.
4108 1. fold_rtx in cse.c. This code uses various CSE specific
4109 information to aid in RTL simplification.
4111 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4112 it uses combine specific information to aid in RTL
4115 3. The routines in this file.
4118 Long term we want to only have one body of simplification code; to
4119 get to that state I recommend the following steps:
4121 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4122 which are not pass dependent state into these routines.
4124 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4125 use this routine whenever possible.
4127 3. Allow for pass dependent state to be provided to these
4128 routines and add simplifications based on the pass dependent
4129 state. Remove code from cse.c & combine.c that becomes
4132 It will take time, but ultimately the compiler will be easier to
4133 maintain and improve. It's totally silly that when we add a
4134 simplification that it needs to be added to 4 places (3 for RTL
4135 simplification and 1 for tree simplification. */
4138 simplify_rtx (rtx x
)
4140 enum rtx_code code
= GET_CODE (x
);
4141 enum machine_mode mode
= GET_MODE (x
);
4143 switch (GET_RTX_CLASS (code
))
4146 return simplify_unary_operation (code
, mode
,
4147 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
4148 case RTX_COMM_ARITH
:
4149 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
4150 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
4152 /* Fall through.... */
4155 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
4158 case RTX_BITFIELD_OPS
:
4159 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
4160 XEXP (x
, 0), XEXP (x
, 1),
4164 case RTX_COMM_COMPARE
:
4165 return simplify_relational_operation (code
, mode
,
4166 ((GET_MODE (XEXP (x
, 0))
4168 ? GET_MODE (XEXP (x
, 0))
4169 : GET_MODE (XEXP (x
, 1))),
4175 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
4176 GET_MODE (SUBREG_REG (x
)),
4183 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4184 if (GET_CODE (XEXP (x
, 0)) == HIGH
4185 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))