1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static bool plus_minus_operand_p (rtx
);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
57 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
59 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
61 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
62 enum machine_mode
, rtx
, rtx
);
63 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
64 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
70 neg_const_int (enum machine_mode mode
, rtx i
)
72 return gen_int_mode (- INTVAL (i
), mode
);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
79 mode_signbit_p (enum machine_mode mode
, rtx x
)
81 unsigned HOST_WIDE_INT val
;
84 if (GET_MODE_CLASS (mode
) != MODE_INT
)
87 width
= GET_MODE_BITSIZE (mode
);
91 if (width
<= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x
) == CONST_INT
)
94 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x
) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x
) == 0)
98 val
= CONST_DOUBLE_HIGH (x
);
99 width
-= HOST_BITS_PER_WIDE_INT
;
104 if (width
< HOST_BITS_PER_WIDE_INT
)
105 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
106 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
118 /* Put complex operands first and constants second if commutative. */
119 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
120 && swap_commutative_operands_p (op0
, op1
))
121 tem
= op0
, op0
= op1
, op1
= tem
;
123 /* If this simplifies, do it. */
124 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
128 /* Handle addition and subtraction specially. Otherwise, just form
131 if (code
== PLUS
|| code
== MINUS
)
133 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
138 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
141 /* If X is a MEM referencing the constant pool, return the real value.
142 Otherwise return X. */
144 avoid_constant_pool_reference (rtx x
)
147 enum machine_mode cmode
;
148 HOST_WIDE_INT offset
= 0;
150 switch (GET_CODE (x
))
156 /* Handle float extensions of constant pool references. */
158 c
= avoid_constant_pool_reference (tmp
);
159 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
163 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
164 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
174 /* Call target hook to avoid the effects of -fpic etc.... */
175 addr
= targetm
.delegitimize_address (addr
);
177 /* Split the address into a base and integer offset. */
178 if (GET_CODE (addr
) == CONST
179 && GET_CODE (XEXP (addr
, 0)) == PLUS
180 && GET_CODE (XEXP (XEXP (addr
, 0), 1)) == CONST_INT
)
182 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
183 addr
= XEXP (XEXP (addr
, 0), 0);
186 if (GET_CODE (addr
) == LO_SUM
)
187 addr
= XEXP (addr
, 1);
189 /* If this is a constant pool reference, we can turn it into its
190 constant and hope that simplifications happen. */
191 if (GET_CODE (addr
) == SYMBOL_REF
192 && CONSTANT_POOL_ADDRESS_P (addr
))
194 c
= get_pool_constant (addr
);
195 cmode
= get_pool_mode (addr
);
197 /* If we're accessing the constant in a different mode than it was
198 originally stored, attempt to fix that up via subreg simplifications.
199 If that fails we have no choice but to return the original memory. */
200 if (offset
!= 0 || cmode
!= GET_MODE (x
))
202 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
203 if (tem
&& CONSTANT_P (tem
))
213 /* Make a unary operation by first seeing if it folds and otherwise making
214 the specified operation. */
217 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
218 enum machine_mode op_mode
)
222 /* If this simplifies, use it. */
223 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
226 return gen_rtx_fmt_e (code
, mode
, op
);
229 /* Likewise for ternary operations. */
232 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
233 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
237 /* If this simplifies, use it. */
238 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
242 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
245 /* Likewise, for relational operations.
246 CMP_MODE specifies mode comparison is done in. */
249 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
250 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
254 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
258 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
261 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
262 resulting RTX. Return a new RTX which is as simplified as possible. */
265 simplify_replace_rtx (rtx x
, rtx old_rtx
, rtx new_rtx
)
267 enum rtx_code code
= GET_CODE (x
);
268 enum machine_mode mode
= GET_MODE (x
);
269 enum machine_mode op_mode
;
272 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
273 to build a new expression substituting recursively. If we can't do
274 anything, return our input. */
279 switch (GET_RTX_CLASS (code
))
283 op_mode
= GET_MODE (op0
);
284 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
285 if (op0
== XEXP (x
, 0))
287 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
291 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
292 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
293 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
295 return simplify_gen_binary (code
, mode
, op0
, op1
);
298 case RTX_COMM_COMPARE
:
301 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
302 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
303 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
304 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
306 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
309 case RTX_BITFIELD_OPS
:
311 op_mode
= GET_MODE (op0
);
312 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
313 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
314 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
315 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
317 if (op_mode
== VOIDmode
)
318 op_mode
= GET_MODE (op0
);
319 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
322 /* The only case we try to handle is a SUBREG. */
325 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
326 if (op0
== SUBREG_REG (x
))
328 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
329 GET_MODE (SUBREG_REG (x
)),
331 return op0
? op0
: x
;
338 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
339 if (op0
== XEXP (x
, 0))
341 return replace_equiv_address_nv (x
, op0
);
343 else if (code
== LO_SUM
)
345 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
346 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
348 /* (lo_sum (high x) x) -> x */
349 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
352 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
354 return gen_rtx_LO_SUM (mode
, op0
, op1
);
356 else if (code
== REG
)
358 if (rtx_equal_p (x
, old_rtx
))
369 /* Try to simplify a unary operation CODE whose output mode is to be
370 MODE with input operand OP whose mode was originally OP_MODE.
371 Return zero if no simplification can be made. */
373 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
374 rtx op
, enum machine_mode op_mode
)
378 if (GET_CODE (op
) == CONST
)
381 trueop
= avoid_constant_pool_reference (op
);
383 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
387 return simplify_unary_operation_1 (code
, mode
, op
);
390 /* Perform some simplifications we can do even if the operands
393 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
395 enum rtx_code reversed
;
401 /* (not (not X)) == X. */
402 if (GET_CODE (op
) == NOT
)
405 /* (not (eq X Y)) == (ne X Y), etc. */
406 if (COMPARISON_P (op
)
407 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
408 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
409 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
410 XEXP (op
, 0), XEXP (op
, 1));
412 /* (not (plus X -1)) can become (neg X). */
413 if (GET_CODE (op
) == PLUS
414 && XEXP (op
, 1) == constm1_rtx
)
415 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
417 /* Similarly, (not (neg X)) is (plus X -1). */
418 if (GET_CODE (op
) == NEG
)
419 return plus_constant (XEXP (op
, 0), -1);
421 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
422 if (GET_CODE (op
) == XOR
423 && GET_CODE (XEXP (op
, 1)) == CONST_INT
424 && (temp
= simplify_unary_operation (NOT
, mode
,
425 XEXP (op
, 1), mode
)) != 0)
426 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
428 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
429 if (GET_CODE (op
) == PLUS
430 && GET_CODE (XEXP (op
, 1)) == CONST_INT
431 && mode_signbit_p (mode
, XEXP (op
, 1))
432 && (temp
= simplify_unary_operation (NOT
, mode
,
433 XEXP (op
, 1), mode
)) != 0)
434 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
437 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
438 operands other than 1, but that is not valid. We could do a
439 similar simplification for (not (lshiftrt C X)) where C is
440 just the sign bit, but this doesn't seem common enough to
442 if (GET_CODE (op
) == ASHIFT
443 && XEXP (op
, 0) == const1_rtx
)
445 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
446 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
449 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
450 by reversing the comparison code if valid. */
451 if (STORE_FLAG_VALUE
== -1
453 && (reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
)
454 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
455 XEXP (op
, 0), XEXP (op
, 1));
457 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
458 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
459 so we can perform the above simplification. */
461 if (STORE_FLAG_VALUE
== -1
462 && GET_CODE (op
) == ASHIFTRT
463 && GET_CODE (XEXP (op
, 1)) == CONST_INT
464 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
465 return simplify_gen_relational (GE
, mode
, VOIDmode
,
466 XEXP (op
, 0), const0_rtx
);
471 /* (neg (neg X)) == X. */
472 if (GET_CODE (op
) == NEG
)
475 /* (neg (plus X 1)) can become (not X). */
476 if (GET_CODE (op
) == PLUS
477 && XEXP (op
, 1) == const1_rtx
)
478 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
480 /* Similarly, (neg (not X)) is (plus X 1). */
481 if (GET_CODE (op
) == NOT
)
482 return plus_constant (XEXP (op
, 0), 1);
484 /* (neg (minus X Y)) can become (minus Y X). This transformation
485 isn't safe for modes with signed zeros, since if X and Y are
486 both +0, (minus Y X) is the same as (minus X Y). If the
487 rounding mode is towards +infinity (or -infinity) then the two
488 expressions will be rounded differently. */
489 if (GET_CODE (op
) == MINUS
490 && !HONOR_SIGNED_ZEROS (mode
)
491 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
492 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
494 if (GET_CODE (op
) == PLUS
495 && !HONOR_SIGNED_ZEROS (mode
)
496 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
498 /* (neg (plus A C)) is simplified to (minus -C A). */
499 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
500 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
502 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
504 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
507 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
508 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
509 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
512 /* (neg (mult A B)) becomes (mult (neg A) B).
513 This works even for floating-point values. */
514 if (GET_CODE (op
) == MULT
515 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
517 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
518 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
521 /* NEG commutes with ASHIFT since it is multiplication. Only do
522 this if we can then eliminate the NEG (e.g., if the operand
524 if (GET_CODE (op
) == ASHIFT
)
526 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
528 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
531 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
532 C is equal to the width of MODE minus 1. */
533 if (GET_CODE (op
) == ASHIFTRT
534 && GET_CODE (XEXP (op
, 1)) == CONST_INT
535 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
536 return simplify_gen_binary (LSHIFTRT
, mode
,
537 XEXP (op
, 0), XEXP (op
, 1));
539 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
540 C is equal to the width of MODE minus 1. */
541 if (GET_CODE (op
) == LSHIFTRT
542 && GET_CODE (XEXP (op
, 1)) == CONST_INT
543 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
544 return simplify_gen_binary (ASHIFTRT
, mode
,
545 XEXP (op
, 0), XEXP (op
, 1));
550 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
551 becomes just the MINUS if its mode is MODE. This allows
552 folding switch statements on machines using casesi (such as
554 if (GET_CODE (op
) == TRUNCATE
555 && GET_MODE (XEXP (op
, 0)) == mode
556 && GET_CODE (XEXP (op
, 0)) == MINUS
557 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
558 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
561 /* Check for a sign extension of a subreg of a promoted
562 variable, where the promotion is sign-extended, and the
563 target mode is the same as the variable's promotion. */
564 if (GET_CODE (op
) == SUBREG
565 && SUBREG_PROMOTED_VAR_P (op
)
566 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
567 && GET_MODE (XEXP (op
, 0)) == mode
)
570 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
571 if (! POINTERS_EXTEND_UNSIGNED
572 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
574 || (GET_CODE (op
) == SUBREG
575 && REG_P (SUBREG_REG (op
))
576 && REG_POINTER (SUBREG_REG (op
))
577 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
578 return convert_memory_address (Pmode
, op
);
583 /* Check for a zero extension of a subreg of a promoted
584 variable, where the promotion is zero-extended, and the
585 target mode is the same as the variable's promotion. */
586 if (GET_CODE (op
) == SUBREG
587 && SUBREG_PROMOTED_VAR_P (op
)
588 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
589 && GET_MODE (XEXP (op
, 0)) == mode
)
592 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
593 if (POINTERS_EXTEND_UNSIGNED
> 0
594 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
596 || (GET_CODE (op
) == SUBREG
597 && REG_P (SUBREG_REG (op
))
598 && REG_POINTER (SUBREG_REG (op
))
599 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
600 return convert_memory_address (Pmode
, op
);
611 /* Try to compute the value of a unary operation CODE whose output mode is to
612 be MODE with input operand OP whose mode was originally OP_MODE.
613 Return zero if the value cannot be computed. */
615 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
616 rtx op
, enum machine_mode op_mode
)
618 unsigned int width
= GET_MODE_BITSIZE (mode
);
620 if (code
== VEC_DUPLICATE
)
622 gcc_assert (VECTOR_MODE_P (mode
));
623 if (GET_MODE (op
) != VOIDmode
)
625 if (!VECTOR_MODE_P (GET_MODE (op
)))
626 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
628 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
631 if (GET_CODE (op
) == CONST_INT
|| GET_CODE (op
) == CONST_DOUBLE
632 || GET_CODE (op
) == CONST_VECTOR
)
634 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
635 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
636 rtvec v
= rtvec_alloc (n_elts
);
639 if (GET_CODE (op
) != CONST_VECTOR
)
640 for (i
= 0; i
< n_elts
; i
++)
641 RTVEC_ELT (v
, i
) = op
;
644 enum machine_mode inmode
= GET_MODE (op
);
645 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
646 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
648 gcc_assert (in_n_elts
< n_elts
);
649 gcc_assert ((n_elts
% in_n_elts
) == 0);
650 for (i
= 0; i
< n_elts
; i
++)
651 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
653 return gen_rtx_CONST_VECTOR (mode
, v
);
657 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
659 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
660 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
661 enum machine_mode opmode
= GET_MODE (op
);
662 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
663 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
664 rtvec v
= rtvec_alloc (n_elts
);
667 gcc_assert (op_n_elts
== n_elts
);
668 for (i
= 0; i
< n_elts
; i
++)
670 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
671 CONST_VECTOR_ELT (op
, i
),
672 GET_MODE_INNER (opmode
));
675 RTVEC_ELT (v
, i
) = x
;
677 return gen_rtx_CONST_VECTOR (mode
, v
);
680 /* The order of these tests is critical so that, for example, we don't
681 check the wrong mode (input vs. output) for a conversion operation,
682 such as FIX. At some point, this should be simplified. */
684 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
685 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
687 HOST_WIDE_INT hv
, lv
;
690 if (GET_CODE (op
) == CONST_INT
)
691 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
693 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
695 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
696 d
= real_value_truncate (mode
, d
);
697 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
699 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
700 && (GET_CODE (op
) == CONST_DOUBLE
701 || GET_CODE (op
) == CONST_INT
))
703 HOST_WIDE_INT hv
, lv
;
706 if (GET_CODE (op
) == CONST_INT
)
707 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
709 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
711 if (op_mode
== VOIDmode
)
713 /* We don't know how to interpret negative-looking numbers in
714 this case, so don't try to fold those. */
718 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
721 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
723 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
724 d
= real_value_truncate (mode
, d
);
725 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
728 if (GET_CODE (op
) == CONST_INT
729 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
731 HOST_WIDE_INT arg0
= INTVAL (op
);
745 val
= (arg0
>= 0 ? arg0
: - arg0
);
749 /* Don't use ffs here. Instead, get low order bit and then its
750 number. If arg0 is zero, this will return 0, as desired. */
751 arg0
&= GET_MODE_MASK (mode
);
752 val
= exact_log2 (arg0
& (- arg0
)) + 1;
756 arg0
&= GET_MODE_MASK (mode
);
757 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
760 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
764 arg0
&= GET_MODE_MASK (mode
);
767 /* Even if the value at zero is undefined, we have to come
768 up with some replacement. Seems good enough. */
769 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
770 val
= GET_MODE_BITSIZE (mode
);
773 val
= exact_log2 (arg0
& -arg0
);
777 arg0
&= GET_MODE_MASK (mode
);
780 val
++, arg0
&= arg0
- 1;
784 arg0
&= GET_MODE_MASK (mode
);
787 val
++, arg0
&= arg0
- 1;
796 /* When zero-extending a CONST_INT, we need to know its
798 gcc_assert (op_mode
!= VOIDmode
);
799 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
801 /* If we were really extending the mode,
802 we would have to distinguish between zero-extension
803 and sign-extension. */
804 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
807 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
808 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
814 if (op_mode
== VOIDmode
)
816 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
818 /* If we were really extending the mode,
819 we would have to distinguish between zero-extension
820 and sign-extension. */
821 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
824 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
827 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
829 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
830 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
847 return gen_int_mode (val
, mode
);
850 /* We can do some operations on integer CONST_DOUBLEs. Also allow
851 for a DImode operation on a CONST_INT. */
852 else if (GET_MODE (op
) == VOIDmode
853 && width
<= HOST_BITS_PER_WIDE_INT
* 2
854 && (GET_CODE (op
) == CONST_DOUBLE
855 || GET_CODE (op
) == CONST_INT
))
857 unsigned HOST_WIDE_INT l1
, lv
;
858 HOST_WIDE_INT h1
, hv
;
860 if (GET_CODE (op
) == CONST_DOUBLE
)
861 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
863 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
873 neg_double (l1
, h1
, &lv
, &hv
);
878 neg_double (l1
, h1
, &lv
, &hv
);
890 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
893 lv
= exact_log2 (l1
& -l1
) + 1;
899 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
900 - HOST_BITS_PER_WIDE_INT
;
902 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
903 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
904 lv
= GET_MODE_BITSIZE (mode
);
910 lv
= exact_log2 (l1
& -l1
);
912 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
913 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
914 lv
= GET_MODE_BITSIZE (mode
);
937 /* This is just a change-of-mode, so do nothing. */
942 gcc_assert (op_mode
!= VOIDmode
);
944 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
948 lv
= l1
& GET_MODE_MASK (op_mode
);
952 if (op_mode
== VOIDmode
953 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
957 lv
= l1
& GET_MODE_MASK (op_mode
);
958 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
959 && (lv
& ((HOST_WIDE_INT
) 1
960 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
961 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
963 hv
= HWI_SIGN_EXTEND (lv
);
974 return immed_double_const (lv
, hv
, mode
);
977 else if (GET_CODE (op
) == CONST_DOUBLE
978 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
980 REAL_VALUE_TYPE d
, t
;
981 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
986 if (HONOR_SNANS (mode
) && real_isnan (&d
))
988 real_sqrt (&t
, mode
, &d
);
992 d
= REAL_VALUE_ABS (d
);
995 d
= REAL_VALUE_NEGATE (d
);
998 d
= real_value_truncate (mode
, d
);
1001 /* All this does is change the mode. */
1004 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1011 real_to_target (tmp
, &d
, GET_MODE (op
));
1012 for (i
= 0; i
< 4; i
++)
1014 real_from_target (&d
, tmp
, mode
);
1020 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1023 else if (GET_CODE (op
) == CONST_DOUBLE
1024 && GET_MODE_CLASS (GET_MODE (op
)) == MODE_FLOAT
1025 && GET_MODE_CLASS (mode
) == MODE_INT
1026 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1028 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1029 operators are intentionally left unspecified (to ease implementation
1030 by target backends), for consistency, this routine implements the
1031 same semantics for constant folding as used by the middle-end. */
1033 /* This was formerly used only for non-IEEE float.
1034 eggert@twinsun.com says it is safe for IEEE also. */
1035 HOST_WIDE_INT xh
, xl
, th
, tl
;
1036 REAL_VALUE_TYPE x
, t
;
1037 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1041 if (REAL_VALUE_ISNAN (x
))
1044 /* Test against the signed upper bound. */
1045 if (width
> HOST_BITS_PER_WIDE_INT
)
1047 th
= ((unsigned HOST_WIDE_INT
) 1
1048 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1054 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1056 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1057 if (REAL_VALUES_LESS (t
, x
))
1064 /* Test against the signed lower bound. */
1065 if (width
> HOST_BITS_PER_WIDE_INT
)
1067 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1073 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1075 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1076 if (REAL_VALUES_LESS (x
, t
))
1082 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1086 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1089 /* Test against the unsigned upper bound. */
1090 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1095 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1097 th
= ((unsigned HOST_WIDE_INT
) 1
1098 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1104 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1106 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1107 if (REAL_VALUES_LESS (t
, x
))
1114 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1120 return immed_double_const (xl
, xh
, mode
);
1126 /* Subroutine of simplify_binary_operation to simplify a commutative,
1127 associative binary operation CODE with result mode MODE, operating
1128 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1129 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1130 canonicalization is possible. */
1133 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1138 /* Linearize the operator to the left. */
1139 if (GET_CODE (op1
) == code
)
1141 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1142 if (GET_CODE (op0
) == code
)
1144 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1145 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1148 /* "a op (b op c)" becomes "(b op c) op a". */
1149 if (! swap_commutative_operands_p (op1
, op0
))
1150 return simplify_gen_binary (code
, mode
, op1
, op0
);
1157 if (GET_CODE (op0
) == code
)
1159 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1160 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1162 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1163 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1166 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1167 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1168 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1169 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1171 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1173 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1174 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1175 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1176 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1178 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1185 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1186 and OP1. Return 0 if no simplification is possible.
1188 Don't use this for relational operations such as EQ or LT.
1189 Use simplify_relational_operation instead. */
1191 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1194 rtx trueop0
, trueop1
;
1197 /* Relational operations don't work here. We must know the mode
1198 of the operands in order to do the comparison correctly.
1199 Assuming a full word can give incorrect results.
1200 Consider comparing 128 with -128 in QImode. */
1201 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1202 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1204 /* Make sure the constant is second. */
1205 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1206 && swap_commutative_operands_p (op0
, op1
))
1208 tem
= op0
, op0
= op1
, op1
= tem
;
1211 trueop0
= avoid_constant_pool_reference (op0
);
1212 trueop1
= avoid_constant_pool_reference (op1
);
1214 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1217 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1221 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1222 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1226 unsigned int width
= GET_MODE_BITSIZE (mode
);
1228 /* Even if we can't compute a constant result,
1229 there are some cases worth simplifying. */
1234 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1235 when x is NaN, infinite, or finite and nonzero. They aren't
1236 when x is -0 and the rounding mode is not towards -infinity,
1237 since (-0) + 0 is then 0. */
1238 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1241 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1242 transformations are safe even for IEEE. */
1243 if (GET_CODE (op0
) == NEG
)
1244 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1245 else if (GET_CODE (op1
) == NEG
)
1246 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1248 /* (~a) + 1 -> -a */
1249 if (INTEGRAL_MODE_P (mode
)
1250 && GET_CODE (op0
) == NOT
1251 && trueop1
== const1_rtx
)
1252 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1254 /* Handle both-operands-constant cases. We can only add
1255 CONST_INTs to constants since the sum of relocatable symbols
1256 can't be handled by most assemblers. Don't add CONST_INT
1257 to CONST_INT since overflow won't be computed properly if wider
1258 than HOST_BITS_PER_WIDE_INT. */
1260 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1261 && GET_CODE (op1
) == CONST_INT
)
1262 return plus_constant (op0
, INTVAL (op1
));
1263 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1264 && GET_CODE (op0
) == CONST_INT
)
1265 return plus_constant (op1
, INTVAL (op0
));
1267 /* See if this is something like X * C - X or vice versa or
1268 if the multiplication is written as a shift. If so, we can
1269 distribute and make a new multiply, shift, or maybe just
1270 have X (if C is 2 in the example above). But don't make
1271 something more expensive than we had before. */
1273 if (SCALAR_INT_MODE_P (mode
))
1275 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1276 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1277 rtx lhs
= op0
, rhs
= op1
;
1279 if (GET_CODE (lhs
) == NEG
)
1283 lhs
= XEXP (lhs
, 0);
1285 else if (GET_CODE (lhs
) == MULT
1286 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1288 coeff0l
= INTVAL (XEXP (lhs
, 1));
1289 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1290 lhs
= XEXP (lhs
, 0);
1292 else if (GET_CODE (lhs
) == ASHIFT
1293 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1294 && INTVAL (XEXP (lhs
, 1)) >= 0
1295 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1297 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1299 lhs
= XEXP (lhs
, 0);
1302 if (GET_CODE (rhs
) == NEG
)
1306 rhs
= XEXP (rhs
, 0);
1308 else if (GET_CODE (rhs
) == MULT
1309 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1311 coeff1l
= INTVAL (XEXP (rhs
, 1));
1312 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1313 rhs
= XEXP (rhs
, 0);
1315 else if (GET_CODE (rhs
) == ASHIFT
1316 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1317 && INTVAL (XEXP (rhs
, 1)) >= 0
1318 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1320 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1322 rhs
= XEXP (rhs
, 0);
1325 if (rtx_equal_p (lhs
, rhs
))
1327 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1329 unsigned HOST_WIDE_INT l
;
1332 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1333 coeff
= immed_double_const (l
, h
, mode
);
1335 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1336 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1341 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1342 if ((GET_CODE (op1
) == CONST_INT
1343 || GET_CODE (op1
) == CONST_DOUBLE
)
1344 && GET_CODE (op0
) == XOR
1345 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1346 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1347 && mode_signbit_p (mode
, op1
))
1348 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1349 simplify_gen_binary (XOR
, mode
, op1
,
1352 /* If one of the operands is a PLUS or a MINUS, see if we can
1353 simplify this by the associative law.
1354 Don't use the associative law for floating point.
1355 The inaccuracy makes it nonassociative,
1356 and subtle programs can break if operations are associated. */
1358 if (INTEGRAL_MODE_P (mode
)
1359 && (plus_minus_operand_p (op0
)
1360 || plus_minus_operand_p (op1
))
1361 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1364 /* Reassociate floating point addition only when the user
1365 specifies unsafe math optimizations. */
1366 if (FLOAT_MODE_P (mode
)
1367 && flag_unsafe_math_optimizations
)
1369 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1377 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1378 using cc0, in which case we want to leave it as a COMPARE
1379 so we can distinguish it from a register-register-copy.
1381 In IEEE floating point, x-0 is not the same as x. */
1383 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1384 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1385 && trueop1
== CONST0_RTX (mode
))
1389 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1390 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1391 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1392 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1394 rtx xop00
= XEXP (op0
, 0);
1395 rtx xop10
= XEXP (op1
, 0);
1398 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1400 if (REG_P (xop00
) && REG_P (xop10
)
1401 && GET_MODE (xop00
) == GET_MODE (xop10
)
1402 && REGNO (xop00
) == REGNO (xop10
)
1403 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1404 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1411 /* We can't assume x-x is 0 even with non-IEEE floating point,
1412 but since it is zero except in very strange circumstances, we
1413 will treat it as zero with -funsafe-math-optimizations. */
1414 if (rtx_equal_p (trueop0
, trueop1
)
1415 && ! side_effects_p (op0
)
1416 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1417 return CONST0_RTX (mode
);
1419 /* Change subtraction from zero into negation. (0 - x) is the
1420 same as -x when x is NaN, infinite, or finite and nonzero.
1421 But if the mode has signed zeros, and does not round towards
1422 -infinity, then 0 - 0 is 0, not -0. */
1423 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1424 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1426 /* (-1 - a) is ~a. */
1427 if (trueop0
== constm1_rtx
)
1428 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1430 /* Subtracting 0 has no effect unless the mode has signed zeros
1431 and supports rounding towards -infinity. In such a case,
1433 if (!(HONOR_SIGNED_ZEROS (mode
)
1434 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1435 && trueop1
== CONST0_RTX (mode
))
1438 /* See if this is something like X * C - X or vice versa or
1439 if the multiplication is written as a shift. If so, we can
1440 distribute and make a new multiply, shift, or maybe just
1441 have X (if C is 2 in the example above). But don't make
1442 something more expensive than we had before. */
1444 if (SCALAR_INT_MODE_P (mode
))
1446 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1447 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1448 rtx lhs
= op0
, rhs
= op1
;
1450 if (GET_CODE (lhs
) == NEG
)
1454 lhs
= XEXP (lhs
, 0);
1456 else if (GET_CODE (lhs
) == MULT
1457 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1459 coeff0l
= INTVAL (XEXP (lhs
, 1));
1460 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1461 lhs
= XEXP (lhs
, 0);
1463 else if (GET_CODE (lhs
) == ASHIFT
1464 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1465 && INTVAL (XEXP (lhs
, 1)) >= 0
1466 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1468 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1470 lhs
= XEXP (lhs
, 0);
1473 if (GET_CODE (rhs
) == NEG
)
1477 rhs
= XEXP (rhs
, 0);
1479 else if (GET_CODE (rhs
) == MULT
1480 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1482 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1483 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1484 rhs
= XEXP (rhs
, 0);
1486 else if (GET_CODE (rhs
) == ASHIFT
1487 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1488 && INTVAL (XEXP (rhs
, 1)) >= 0
1489 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1491 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1493 rhs
= XEXP (rhs
, 0);
1496 if (rtx_equal_p (lhs
, rhs
))
1498 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1500 unsigned HOST_WIDE_INT l
;
1503 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1504 coeff
= immed_double_const (l
, h
, mode
);
1506 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1507 return rtx_cost (tem
, SET
) <= rtx_cost (orig
, SET
)
1512 /* (a - (-b)) -> (a + b). True even for IEEE. */
1513 if (GET_CODE (op1
) == NEG
)
1514 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1516 /* (-x - c) may be simplified as (-c - x). */
1517 if (GET_CODE (op0
) == NEG
1518 && (GET_CODE (op1
) == CONST_INT
1519 || GET_CODE (op1
) == CONST_DOUBLE
))
1521 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1523 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1526 /* If one of the operands is a PLUS or a MINUS, see if we can
1527 simplify this by the associative law.
1528 Don't use the associative law for floating point.
1529 The inaccuracy makes it nonassociative,
1530 and subtle programs can break if operations are associated. */
1532 if (INTEGRAL_MODE_P (mode
)
1533 && (plus_minus_operand_p (op0
)
1534 || plus_minus_operand_p (op1
))
1535 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1538 /* Don't let a relocatable value get a negative coeff. */
1539 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1540 return simplify_gen_binary (PLUS
, mode
,
1542 neg_const_int (mode
, op1
));
1544 /* (x - (x & y)) -> (x & ~y) */
1545 if (GET_CODE (op1
) == AND
)
1547 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1549 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1550 GET_MODE (XEXP (op1
, 1)));
1551 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1553 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1555 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1556 GET_MODE (XEXP (op1
, 0)));
1557 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1563 if (trueop1
== constm1_rtx
)
1564 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1566 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1567 x is NaN, since x * 0 is then also NaN. Nor is it valid
1568 when the mode has signed zeros, since multiplying a negative
1569 number by 0 will give -0, not 0. */
1570 if (!HONOR_NANS (mode
)
1571 && !HONOR_SIGNED_ZEROS (mode
)
1572 && trueop1
== CONST0_RTX (mode
)
1573 && ! side_effects_p (op0
))
1576 /* In IEEE floating point, x*1 is not equivalent to x for
1578 if (!HONOR_SNANS (mode
)
1579 && trueop1
== CONST1_RTX (mode
))
1582 /* Convert multiply by constant power of two into shift unless
1583 we are still generating RTL. This test is a kludge. */
1584 if (GET_CODE (trueop1
) == CONST_INT
1585 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1586 /* If the mode is larger than the host word size, and the
1587 uppermost bit is set, then this isn't a power of two due
1588 to implicit sign extension. */
1589 && (width
<= HOST_BITS_PER_WIDE_INT
1590 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
1591 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1593 /* Likewise for multipliers wider than a word. */
1594 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1595 && (GET_MODE (trueop1
) == VOIDmode
1596 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
1597 && GET_MODE (op0
) == mode
1598 && CONST_DOUBLE_LOW (trueop1
) == 0
1599 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
1600 return simplify_gen_binary (ASHIFT
, mode
, op0
,
1601 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
1603 /* x*2 is x+x and x*(-1) is -x */
1604 if (GET_CODE (trueop1
) == CONST_DOUBLE
1605 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1606 && GET_MODE (op0
) == mode
)
1609 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1611 if (REAL_VALUES_EQUAL (d
, dconst2
))
1612 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1614 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1615 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1618 /* Reassociate multiplication, but for floating point MULTs
1619 only when the user specifies unsafe math optimizations. */
1620 if (! FLOAT_MODE_P (mode
)
1621 || flag_unsafe_math_optimizations
)
1623 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1630 if (trueop1
== const0_rtx
)
1632 if (GET_CODE (trueop1
) == CONST_INT
1633 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1634 == GET_MODE_MASK (mode
)))
1636 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1638 /* A | (~A) -> -1 */
1639 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1640 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1641 && ! side_effects_p (op0
)
1642 && SCALAR_INT_MODE_P (mode
))
1644 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1650 if (trueop1
== const0_rtx
)
1652 if (GET_CODE (trueop1
) == CONST_INT
1653 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1654 == GET_MODE_MASK (mode
)))
1655 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1656 if (trueop0
== trueop1
1657 && ! side_effects_p (op0
)
1658 && GET_MODE_CLASS (mode
) != MODE_CC
)
1659 return CONST0_RTX (mode
);
1661 /* Canonicalize XOR of the most significant bit to PLUS. */
1662 if ((GET_CODE (op1
) == CONST_INT
1663 || GET_CODE (op1
) == CONST_DOUBLE
)
1664 && mode_signbit_p (mode
, op1
))
1665 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
1666 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1667 if ((GET_CODE (op1
) == CONST_INT
1668 || GET_CODE (op1
) == CONST_DOUBLE
)
1669 && GET_CODE (op0
) == PLUS
1670 && (GET_CODE (XEXP (op0
, 1)) == CONST_INT
1671 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1672 && mode_signbit_p (mode
, XEXP (op0
, 1)))
1673 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1674 simplify_gen_binary (XOR
, mode
, op1
,
1677 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1683 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
1685 /* If we are turning off bits already known off in OP0, we need
1687 if (GET_CODE (trueop1
) == CONST_INT
1688 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1689 && (nonzero_bits (trueop0
, mode
) & ~INTVAL (trueop1
)) == 0)
1691 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1692 && GET_MODE_CLASS (mode
) != MODE_CC
)
1695 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1696 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1697 && ! side_effects_p (op0
)
1698 && GET_MODE_CLASS (mode
) != MODE_CC
)
1699 return CONST0_RTX (mode
);
1701 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1702 there are no nonzero bits of C outside of X's mode. */
1703 if ((GET_CODE (op0
) == SIGN_EXTEND
1704 || GET_CODE (op0
) == ZERO_EXTEND
)
1705 && GET_CODE (trueop1
) == CONST_INT
1706 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1707 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
1708 & INTVAL (trueop1
)) == 0)
1710 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
1711 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
1712 gen_int_mode (INTVAL (trueop1
),
1714 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
1717 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1718 ((A & N) + B) & M -> (A + B) & M
1719 Similarly if (N & M) == 0,
1720 ((A | N) + B) & M -> (A + B) & M
1721 and for - instead of + and/or ^ instead of |. */
1722 if (GET_CODE (trueop1
) == CONST_INT
1723 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
1724 && ~INTVAL (trueop1
)
1725 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
1726 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
1731 pmop
[0] = XEXP (op0
, 0);
1732 pmop
[1] = XEXP (op0
, 1);
1734 for (which
= 0; which
< 2; which
++)
1737 switch (GET_CODE (tem
))
1740 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1741 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
1742 == INTVAL (trueop1
))
1743 pmop
[which
] = XEXP (tem
, 0);
1747 if (GET_CODE (XEXP (tem
, 1)) == CONST_INT
1748 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
1749 pmop
[which
] = XEXP (tem
, 0);
1756 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
1758 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
1760 return simplify_gen_binary (code
, mode
, tem
, op1
);
1763 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1769 /* 0/x is 0 (or x&0 if x has side-effects). */
1770 if (trueop0
== CONST0_RTX (mode
))
1772 if (side_effects_p (op1
))
1773 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1777 if (trueop1
== CONST1_RTX (mode
))
1778 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
1779 /* Convert divide by power of two into shift. */
1780 if (GET_CODE (trueop1
) == CONST_INT
1781 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
1782 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
1786 /* Handle floating point and integers separately. */
1787 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1789 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1790 safe for modes with NaNs, since 0.0 / 0.0 will then be
1791 NaN rather than 0.0. Nor is it safe for modes with signed
1792 zeros, since dividing 0 by a negative number gives -0.0 */
1793 if (trueop0
== CONST0_RTX (mode
)
1794 && !HONOR_NANS (mode
)
1795 && !HONOR_SIGNED_ZEROS (mode
)
1796 && ! side_effects_p (op1
))
1799 if (trueop1
== CONST1_RTX (mode
)
1800 && !HONOR_SNANS (mode
))
1803 if (GET_CODE (trueop1
) == CONST_DOUBLE
1804 && trueop1
!= CONST0_RTX (mode
))
1807 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1810 if (REAL_VALUES_EQUAL (d
, dconstm1
)
1811 && !HONOR_SNANS (mode
))
1812 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1814 /* Change FP division by a constant into multiplication.
1815 Only do this with -funsafe-math-optimizations. */
1816 if (flag_unsafe_math_optimizations
1817 && !REAL_VALUES_EQUAL (d
, dconst0
))
1819 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
1820 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1821 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1827 /* 0/x is 0 (or x&0 if x has side-effects). */
1828 if (trueop0
== CONST0_RTX (mode
))
1830 if (side_effects_p (op1
))
1831 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1835 if (trueop1
== CONST1_RTX (mode
))
1836 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
1838 if (trueop1
== constm1_rtx
)
1840 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
1841 return simplify_gen_unary (NEG
, mode
, x
, mode
);
1847 /* 0%x is 0 (or x&0 if x has side-effects). */
1848 if (trueop0
== CONST0_RTX (mode
))
1850 if (side_effects_p (op1
))
1851 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1854 /* x%1 is 0 (of x&0 if x has side-effects). */
1855 if (trueop1
== CONST1_RTX (mode
))
1857 if (side_effects_p (op0
))
1858 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
1859 return CONST0_RTX (mode
);
1861 /* Implement modulus by power of two as AND. */
1862 if (GET_CODE (trueop1
) == CONST_INT
1863 && exact_log2 (INTVAL (trueop1
)) > 0)
1864 return simplify_gen_binary (AND
, mode
, op0
,
1865 GEN_INT (INTVAL (op1
) - 1));
1869 /* 0%x is 0 (or x&0 if x has side-effects). */
1870 if (trueop0
== CONST0_RTX (mode
))
1872 if (side_effects_p (op1
))
1873 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
1876 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1877 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
1879 if (side_effects_p (op0
))
1880 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
1881 return CONST0_RTX (mode
);
1888 /* Rotating ~0 always results in ~0. */
1889 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1890 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1891 && ! side_effects_p (op1
))
1894 /* Fall through.... */
1898 if (trueop1
== CONST0_RTX (mode
))
1900 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
1905 if (width
<= HOST_BITS_PER_WIDE_INT
1906 && GET_CODE (trueop1
) == CONST_INT
1907 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1908 && ! side_effects_p (op0
))
1910 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1912 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1918 if (width
<= HOST_BITS_PER_WIDE_INT
1919 && GET_CODE (trueop1
) == CONST_INT
1920 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1921 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1922 && ! side_effects_p (op0
))
1924 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1926 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1932 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
1934 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1936 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1942 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1944 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1946 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1955 /* ??? There are simplifications that can be done. */
1959 if (!VECTOR_MODE_P (mode
))
1961 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
1962 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
1963 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
1964 gcc_assert (XVECLEN (trueop1
, 0) == 1);
1965 gcc_assert (GET_CODE (XVECEXP (trueop1
, 0, 0)) == CONST_INT
);
1967 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1968 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
1973 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
1974 gcc_assert (GET_MODE_INNER (mode
)
1975 == GET_MODE_INNER (GET_MODE (trueop0
)));
1976 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
1978 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1980 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1981 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1982 rtvec v
= rtvec_alloc (n_elts
);
1985 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
1986 for (i
= 0; i
< n_elts
; i
++)
1988 rtx x
= XVECEXP (trueop1
, 0, i
);
1990 gcc_assert (GET_CODE (x
) == CONST_INT
);
1991 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
1995 return gen_rtx_CONST_VECTOR (mode
, v
);
2001 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2002 ? GET_MODE (trueop0
)
2003 : GET_MODE_INNER (mode
));
2004 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2005 ? GET_MODE (trueop1
)
2006 : GET_MODE_INNER (mode
));
2008 gcc_assert (VECTOR_MODE_P (mode
));
2009 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2010 == GET_MODE_SIZE (mode
));
2012 if (VECTOR_MODE_P (op0_mode
))
2013 gcc_assert (GET_MODE_INNER (mode
)
2014 == GET_MODE_INNER (op0_mode
));
2016 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2018 if (VECTOR_MODE_P (op1_mode
))
2019 gcc_assert (GET_MODE_INNER (mode
)
2020 == GET_MODE_INNER (op1_mode
));
2022 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2024 if ((GET_CODE (trueop0
) == CONST_VECTOR
2025 || GET_CODE (trueop0
) == CONST_INT
2026 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2027 && (GET_CODE (trueop1
) == CONST_VECTOR
2028 || GET_CODE (trueop1
) == CONST_INT
2029 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2031 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2032 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2033 rtvec v
= rtvec_alloc (n_elts
);
2035 unsigned in_n_elts
= 1;
2037 if (VECTOR_MODE_P (op0_mode
))
2038 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2039 for (i
= 0; i
< n_elts
; i
++)
2043 if (!VECTOR_MODE_P (op0_mode
))
2044 RTVEC_ELT (v
, i
) = trueop0
;
2046 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2050 if (!VECTOR_MODE_P (op1_mode
))
2051 RTVEC_ELT (v
, i
) = trueop1
;
2053 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2058 return gen_rtx_CONST_VECTOR (mode
, v
);
2071 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2074 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
2076 unsigned int width
= GET_MODE_BITSIZE (mode
);
2078 if (VECTOR_MODE_P (mode
)
2079 && code
!= VEC_CONCAT
2080 && GET_CODE (op0
) == CONST_VECTOR
2081 && GET_CODE (op1
) == CONST_VECTOR
)
2083 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2084 enum machine_mode op0mode
= GET_MODE (op0
);
2085 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
2086 enum machine_mode op1mode
= GET_MODE (op1
);
2087 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
2088 rtvec v
= rtvec_alloc (n_elts
);
2091 gcc_assert (op0_n_elts
== n_elts
);
2092 gcc_assert (op1_n_elts
== n_elts
);
2093 for (i
= 0; i
< n_elts
; i
++)
2095 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
2096 CONST_VECTOR_ELT (op0
, i
),
2097 CONST_VECTOR_ELT (op1
, i
));
2100 RTVEC_ELT (v
, i
) = x
;
2103 return gen_rtx_CONST_VECTOR (mode
, v
);
2106 if (VECTOR_MODE_P (mode
)
2107 && code
== VEC_CONCAT
2108 && CONSTANT_P (op0
) && CONSTANT_P (op1
))
2110 unsigned n_elts
= GET_MODE_NUNITS (mode
);
2111 rtvec v
= rtvec_alloc (n_elts
);
2113 gcc_assert (n_elts
>= 2);
2116 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
2117 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
2119 RTVEC_ELT (v
, 0) = op0
;
2120 RTVEC_ELT (v
, 1) = op1
;
2124 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
2125 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
2128 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
2129 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
2130 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
2132 for (i
= 0; i
< op0_n_elts
; ++i
)
2133 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
2134 for (i
= 0; i
< op1_n_elts
; ++i
)
2135 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
2138 return gen_rtx_CONST_VECTOR (mode
, v
);
2141 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
2142 && GET_CODE (op0
) == CONST_DOUBLE
2143 && GET_CODE (op1
) == CONST_DOUBLE
2144 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
2155 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
2157 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
2159 for (i
= 0; i
< 4; i
++)
2176 real_from_target (&r
, tmp0
, mode
);
2177 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
2181 REAL_VALUE_TYPE f0
, f1
, value
, result
;
2184 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
2185 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
2186 real_convert (&f0
, mode
, &f0
);
2187 real_convert (&f1
, mode
, &f1
);
2189 if (HONOR_SNANS (mode
)
2190 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
2194 && REAL_VALUES_EQUAL (f1
, dconst0
)
2195 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
2198 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2199 && flag_trapping_math
2200 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
2202 int s0
= REAL_VALUE_NEGATIVE (f0
);
2203 int s1
= REAL_VALUE_NEGATIVE (f1
);
2208 /* Inf + -Inf = NaN plus exception. */
2213 /* Inf - Inf = NaN plus exception. */
2218 /* Inf / Inf = NaN plus exception. */
2225 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
2226 && flag_trapping_math
2227 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
2228 || (REAL_VALUE_ISINF (f1
)
2229 && REAL_VALUES_EQUAL (f0
, dconst0
))))
2230 /* Inf * 0 = NaN plus exception. */
2233 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
2235 real_convert (&result
, mode
, &value
);
2237 /* Don't constant fold this floating point operation if the
2238 result may dependent upon the run-time rounding mode and
2239 flag_rounding_math is set, or if GCC's software emulation
2240 is unable to accurately represent the result. */
2242 if ((flag_rounding_math
2243 || (REAL_MODE_FORMAT_COMPOSITE_P (mode
)
2244 && !flag_unsafe_math_optimizations
))
2245 && (inexact
|| !real_identical (&result
, &value
)))
2248 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
2252 /* We can fold some multi-word operations. */
2253 if (GET_MODE_CLASS (mode
) == MODE_INT
2254 && width
== HOST_BITS_PER_WIDE_INT
* 2
2255 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
2256 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
2258 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
2259 HOST_WIDE_INT h1
, h2
, hv
, ht
;
2261 if (GET_CODE (op0
) == CONST_DOUBLE
)
2262 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
2264 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
2266 if (GET_CODE (op1
) == CONST_DOUBLE
)
2267 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
2269 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
2274 /* A - B == A + (-B). */
2275 neg_double (l2
, h2
, &lv
, &hv
);
2278 /* Fall through.... */
2281 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2285 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
2289 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2290 &lv
, &hv
, <
, &ht
))
2295 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
2296 <
, &ht
, &lv
, &hv
))
2301 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2302 &lv
, &hv
, <
, &ht
))
2307 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
2308 <
, &ht
, &lv
, &hv
))
2313 lv
= l1
& l2
, hv
= h1
& h2
;
2317 lv
= l1
| l2
, hv
= h1
| h2
;
2321 lv
= l1
^ l2
, hv
= h1
^ h2
;
2327 && ((unsigned HOST_WIDE_INT
) l1
2328 < (unsigned HOST_WIDE_INT
) l2
)))
2337 && ((unsigned HOST_WIDE_INT
) l1
2338 > (unsigned HOST_WIDE_INT
) l2
)))
2345 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
2347 && ((unsigned HOST_WIDE_INT
) l1
2348 < (unsigned HOST_WIDE_INT
) l2
)))
2355 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
2357 && ((unsigned HOST_WIDE_INT
) l1
2358 > (unsigned HOST_WIDE_INT
) l2
)))
2364 case LSHIFTRT
: case ASHIFTRT
:
2366 case ROTATE
: case ROTATERT
:
2367 if (SHIFT_COUNT_TRUNCATED
)
2368 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
2370 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
2373 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
2374 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
2376 else if (code
== ASHIFT
)
2377 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
2378 else if (code
== ROTATE
)
2379 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
2380 else /* code == ROTATERT */
2381 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
2388 return immed_double_const (lv
, hv
, mode
);
2391 if (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) == CONST_INT
2392 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
2394 /* Get the integer argument values in two forms:
2395 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2397 arg0
= INTVAL (op0
);
2398 arg1
= INTVAL (op1
);
2400 if (width
< HOST_BITS_PER_WIDE_INT
)
2402 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2403 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2406 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2407 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2410 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2411 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2419 /* Compute the value of the arithmetic. */
2424 val
= arg0s
+ arg1s
;
2428 val
= arg0s
- arg1s
;
2432 val
= arg0s
* arg1s
;
2437 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2440 val
= arg0s
/ arg1s
;
2445 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2448 val
= arg0s
% arg1s
;
2453 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2456 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2461 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2464 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2482 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
2483 the value is in range. We can't return any old value for
2484 out-of-range arguments because either the middle-end (via
2485 shift_truncation_mask) or the back-end might be relying on
2486 target-specific knowledge. Nor can we rely on
2487 shift_truncation_mask, since the shift might not be part of an
2488 ashlM3, lshrM3 or ashrM3 instruction. */
2489 if (SHIFT_COUNT_TRUNCATED
)
2490 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
2491 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
2494 val
= (code
== ASHIFT
2495 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
2496 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
2498 /* Sign-extend the result for arithmetic right shifts. */
2499 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
2500 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
2508 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2509 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2517 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2518 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2522 /* Do nothing here. */
2526 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2530 val
= ((unsigned HOST_WIDE_INT
) arg0
2531 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2535 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2539 val
= ((unsigned HOST_WIDE_INT
) arg0
2540 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2547 /* ??? There are simplifications that can be done. */
2554 return gen_int_mode (val
, mode
);
2562 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2565 Rather than test for specific case, we do this by a brute-force method
2566 and do all possible simplifications until no more changes occur. Then
2567 we rebuild the operation.
2569 If FORCE is true, then always generate the rtx. This is used to
2570 canonicalize stuff emitted from simplify_gen_binary. Note that this
2571 can still fail if the rtx is too complex. It won't fail just because
2572 the result is not 'simpler' than the input, however. */
2574 struct simplify_plus_minus_op_data
2581 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2583 const struct simplify_plus_minus_op_data
*d1
= p1
;
2584 const struct simplify_plus_minus_op_data
*d2
= p2
;
2586 return (commutative_operand_precedence (d2
->op
)
2587 - commutative_operand_precedence (d1
->op
));
2591 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2594 struct simplify_plus_minus_op_data ops
[8];
2596 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2600 memset (ops
, 0, sizeof ops
);
2602 /* Set up the two operands and then expand them until nothing has been
2603 changed. If we run out of room in our array, give up; this should
2604 almost never happen. */
2609 ops
[1].neg
= (code
== MINUS
);
2615 for (i
= 0; i
< n_ops
; i
++)
2617 rtx this_op
= ops
[i
].op
;
2618 int this_neg
= ops
[i
].neg
;
2619 enum rtx_code this_code
= GET_CODE (this_op
);
2628 ops
[n_ops
].op
= XEXP (this_op
, 1);
2629 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2632 ops
[i
].op
= XEXP (this_op
, 0);
2638 ops
[i
].op
= XEXP (this_op
, 0);
2639 ops
[i
].neg
= ! this_neg
;
2645 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2646 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2647 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2649 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2650 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2651 ops
[n_ops
].neg
= this_neg
;
2659 /* ~a -> (-a - 1) */
2662 ops
[n_ops
].op
= constm1_rtx
;
2663 ops
[n_ops
++].neg
= this_neg
;
2664 ops
[i
].op
= XEXP (this_op
, 0);
2665 ops
[i
].neg
= !this_neg
;
2673 ops
[i
].op
= neg_const_int (mode
, this_op
);
2686 /* If we only have two operands, we can't do anything. */
2687 if (n_ops
<= 2 && !force
)
2690 /* Count the number of CONSTs we didn't split above. */
2691 for (i
= 0; i
< n_ops
; i
++)
2692 if (GET_CODE (ops
[i
].op
) == CONST
)
2695 /* Now simplify each pair of operands until nothing changes. The first
2696 time through just simplify constants against each other. */
2703 for (i
= 0; i
< n_ops
- 1; i
++)
2704 for (j
= i
+ 1; j
< n_ops
; j
++)
2706 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2707 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2709 if (lhs
!= 0 && rhs
!= 0
2710 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2712 enum rtx_code ncode
= PLUS
;
2718 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2720 else if (swap_commutative_operands_p (lhs
, rhs
))
2721 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2723 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2725 /* Reject "simplifications" that just wrap the two
2726 arguments in a CONST. Failure to do so can result
2727 in infinite recursion with simplify_binary_operation
2728 when it calls us to simplify CONST operations. */
2730 && ! (GET_CODE (tem
) == CONST
2731 && GET_CODE (XEXP (tem
, 0)) == ncode
2732 && XEXP (XEXP (tem
, 0), 0) == lhs
2733 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2734 /* Don't allow -x + -1 -> ~x simplifications in the
2735 first pass. This allows us the chance to combine
2736 the -1 with other constants. */
2738 && GET_CODE (tem
) == NOT
2739 && XEXP (tem
, 0) == rhs
))
2742 if (GET_CODE (tem
) == NEG
)
2743 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2744 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2745 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2749 ops
[j
].op
= NULL_RTX
;
2759 /* Pack all the operands to the lower-numbered entries. */
2760 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2765 /* Sort the operations based on swap_commutative_operands_p. */
2766 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2768 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2770 && GET_CODE (ops
[1].op
) == CONST_INT
2771 && CONSTANT_P (ops
[0].op
)
2773 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2775 /* We suppressed creation of trivial CONST expressions in the
2776 combination loop to avoid recursion. Create one manually now.
2777 The combination loop should have ensured that there is exactly
2778 one CONST_INT, and the sort will have ensured that it is last
2779 in the array and that any other constant will be next-to-last. */
2782 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2783 && CONSTANT_P (ops
[n_ops
- 2].op
))
2785 rtx value
= ops
[n_ops
- 1].op
;
2786 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2787 value
= neg_const_int (mode
, value
);
2788 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2792 /* Count the number of CONSTs that we generated. */
2794 for (i
= 0; i
< n_ops
; i
++)
2795 if (GET_CODE (ops
[i
].op
) == CONST
)
2798 /* Give up if we didn't reduce the number of operands we had. Make
2799 sure we count a CONST as two operands. If we have the same
2800 number of operands, but have made more CONSTs than before, this
2801 is also an improvement, so accept it. */
2803 && (n_ops
+ n_consts
> input_ops
2804 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2807 /* Put a non-negated operand first, if possible. */
2809 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2812 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
2821 /* Now make the result by performing the requested operations. */
2823 for (i
= 1; i
< n_ops
; i
++)
2824 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2825 mode
, result
, ops
[i
].op
);
2830 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2832 plus_minus_operand_p (rtx x
)
2834 return GET_CODE (x
) == PLUS
2835 || GET_CODE (x
) == MINUS
2836 || (GET_CODE (x
) == CONST
2837 && GET_CODE (XEXP (x
, 0)) == PLUS
2838 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
2839 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
2842 /* Like simplify_binary_operation except used for relational operators.
2843 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2844 not also be VOIDmode.
2846 CMP_MODE specifies in which mode the comparison is done in, so it is
2847 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2848 the operands or, if both are VOIDmode, the operands are compared in
2849 "infinite precision". */
2851 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2852 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2854 rtx tem
, trueop0
, trueop1
;
2856 if (cmp_mode
== VOIDmode
)
2857 cmp_mode
= GET_MODE (op0
);
2858 if (cmp_mode
== VOIDmode
)
2859 cmp_mode
= GET_MODE (op1
);
2861 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
2864 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2866 if (tem
== const0_rtx
)
2867 return CONST0_RTX (mode
);
2868 #ifdef FLOAT_STORE_FLAG_VALUE
2870 REAL_VALUE_TYPE val
;
2871 val
= FLOAT_STORE_FLAG_VALUE (mode
);
2872 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
2878 if (VECTOR_MODE_P (mode
))
2880 if (tem
== const0_rtx
)
2881 return CONST0_RTX (mode
);
2882 #ifdef VECTOR_STORE_FLAG_VALUE
2887 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
2888 if (val
== NULL_RTX
)
2890 if (val
== const1_rtx
)
2891 return CONST1_RTX (mode
);
2893 units
= GET_MODE_NUNITS (mode
);
2894 v
= rtvec_alloc (units
);
2895 for (i
= 0; i
< units
; i
++)
2896 RTVEC_ELT (v
, i
) = val
;
2897 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
2907 /* For the following tests, ensure const0_rtx is op1. */
2908 if (swap_commutative_operands_p (op0
, op1
)
2909 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
2910 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
2912 /* If op0 is a compare, extract the comparison arguments from it. */
2913 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2914 return simplify_relational_operation (code
, mode
, VOIDmode
,
2915 XEXP (op0
, 0), XEXP (op0
, 1));
2917 if (mode
== VOIDmode
2918 || GET_MODE_CLASS (cmp_mode
) == MODE_CC
2922 trueop0
= avoid_constant_pool_reference (op0
);
2923 trueop1
= avoid_constant_pool_reference (op1
);
2924 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
2928 /* This part of simplify_relational_operation is only used when CMP_MODE
2929 is not in class MODE_CC (i.e. it is a real comparison).
2931 MODE is the mode of the result, while CMP_MODE specifies in which
2932 mode the comparison is done in, so it is the mode of the operands. */
2935 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2936 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2938 enum rtx_code op0code
= GET_CODE (op0
);
2940 if (GET_CODE (op1
) == CONST_INT
)
2942 if (INTVAL (op1
) == 0 && COMPARISON_P (op0
))
2944 /* If op0 is a comparison, extract the comparison arguments form it. */
2947 if (GET_MODE (op0
) == mode
)
2948 return simplify_rtx (op0
);
2950 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
2951 XEXP (op0
, 0), XEXP (op0
, 1));
2953 else if (code
== EQ
)
2955 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
2956 if (new_code
!= UNKNOWN
)
2957 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
2958 XEXP (op0
, 0), XEXP (op0
, 1));
2963 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2964 if ((code
== EQ
|| code
== NE
)
2965 && (op0code
== PLUS
|| op0code
== MINUS
)
2967 && CONSTANT_P (XEXP (op0
, 1))
2968 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
2970 rtx x
= XEXP (op0
, 0);
2971 rtx c
= XEXP (op0
, 1);
2973 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
2975 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
2978 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
2979 the same as (zero_extract:SI FOO (const_int 1) BAR). */
2981 && op1
== const0_rtx
2982 && GET_MODE_CLASS (mode
) == MODE_INT
2983 && cmp_mode
!= VOIDmode
2984 /* ??? Work-around BImode bugs in the ia64 backend. */
2986 && cmp_mode
!= BImode
2987 && nonzero_bits (op0
, cmp_mode
) == 1
2988 && STORE_FLAG_VALUE
== 1)
2989 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
2990 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
2991 : lowpart_subreg (mode
, op0
, cmp_mode
);
2996 /* Check if the given comparison (done in the given MODE) is actually a
2997 tautology or a contradiction.
2998 If no simplification is possible, this function returns zero.
2999 Otherwise, it returns either const_true_rtx or const0_rtx. */
3002 simplify_const_relational_operation (enum rtx_code code
,
3003 enum machine_mode mode
,
3006 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
3011 gcc_assert (mode
!= VOIDmode
3012 || (GET_MODE (op0
) == VOIDmode
3013 && GET_MODE (op1
) == VOIDmode
));
3015 /* If op0 is a compare, extract the comparison arguments from it. */
3016 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3017 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
3019 /* We can't simplify MODE_CC values since we don't know what the
3020 actual comparison is. */
3021 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
3024 /* Make sure the constant is second. */
3025 if (swap_commutative_operands_p (op0
, op1
))
3027 tem
= op0
, op0
= op1
, op1
= tem
;
3028 code
= swap_condition (code
);
3031 trueop0
= avoid_constant_pool_reference (op0
);
3032 trueop1
= avoid_constant_pool_reference (op1
);
3034 /* For integer comparisons of A and B maybe we can simplify A - B and can
3035 then simplify a comparison of that with zero. If A and B are both either
3036 a register or a CONST_INT, this can't help; testing for these cases will
3037 prevent infinite recursion here and speed things up.
3039 If CODE is an unsigned comparison, then we can never do this optimization,
3040 because it gives an incorrect result if the subtraction wraps around zero.
3041 ANSI C defines unsigned operations such that they never overflow, and
3042 thus such cases can not be ignored; but we cannot do it even for
3043 signed comparisons for languages such as Java, so test flag_wrapv. */
3045 if (!flag_wrapv
&& INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
3046 && ! ((REG_P (op0
) || GET_CODE (trueop0
) == CONST_INT
)
3047 && (REG_P (op1
) || GET_CODE (trueop1
) == CONST_INT
))
3048 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
3049 /* We cannot do this for == or != if tem is a nonzero address. */
3050 && ((code
!= EQ
&& code
!= NE
) || ! nonzero_address_p (tem
))
3051 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
3052 return simplify_const_relational_operation (signed_condition (code
),
3053 mode
, tem
, const0_rtx
);
3055 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
3056 return const_true_rtx
;
3058 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
3061 /* For modes without NaNs, if the two operands are equal, we know the
3062 result except if they have side-effects. */
3063 if (! HONOR_NANS (GET_MODE (trueop0
))
3064 && rtx_equal_p (trueop0
, trueop1
)
3065 && ! side_effects_p (trueop0
))
3066 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
3068 /* If the operands are floating-point constants, see if we can fold
3070 else if (GET_CODE (trueop0
) == CONST_DOUBLE
3071 && GET_CODE (trueop1
) == CONST_DOUBLE
3072 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
3074 REAL_VALUE_TYPE d0
, d1
;
3076 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
3077 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
3079 /* Comparisons are unordered iff at least one of the values is NaN. */
3080 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
3090 return const_true_rtx
;
3103 equal
= REAL_VALUES_EQUAL (d0
, d1
);
3104 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
3105 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
3108 /* Otherwise, see if the operands are both integers. */
3109 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
3110 && (GET_CODE (trueop0
) == CONST_DOUBLE
3111 || GET_CODE (trueop0
) == CONST_INT
)
3112 && (GET_CODE (trueop1
) == CONST_DOUBLE
3113 || GET_CODE (trueop1
) == CONST_INT
))
3115 int width
= GET_MODE_BITSIZE (mode
);
3116 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
3117 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
3119 /* Get the two words comprising each integer constant. */
3120 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
3122 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
3123 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
3127 l0u
= l0s
= INTVAL (trueop0
);
3128 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
3131 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
3133 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
3134 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
3138 l1u
= l1s
= INTVAL (trueop1
);
3139 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
3142 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3143 we have to sign or zero-extend the values. */
3144 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
3146 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3147 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3149 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3150 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3152 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3153 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3155 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
3156 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
3158 equal
= (h0u
== h1u
&& l0u
== l1u
);
3159 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
3160 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
3161 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
3162 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
3165 /* Otherwise, there are some code-specific tests we can make. */
3168 /* Optimize comparisons with upper and lower bounds. */
3169 if (SCALAR_INT_MODE_P (mode
)
3170 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
3183 get_mode_bounds (mode
, sign
, mode
, &mmin
, &mmax
);
3190 /* x >= min is always true. */
3191 if (rtx_equal_p (trueop1
, mmin
))
3192 tem
= const_true_rtx
;
3198 /* x <= max is always true. */
3199 if (rtx_equal_p (trueop1
, mmax
))
3200 tem
= const_true_rtx
;
3205 /* x > max is always false. */
3206 if (rtx_equal_p (trueop1
, mmax
))
3212 /* x < min is always false. */
3213 if (rtx_equal_p (trueop1
, mmin
))
3220 if (tem
== const0_rtx
3221 || tem
== const_true_rtx
)
3228 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3233 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
3234 return const_true_rtx
;
3238 /* Optimize abs(x) < 0.0. */
3239 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
3241 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3243 if (GET_CODE (tem
) == ABS
)
3249 /* Optimize abs(x) >= 0.0. */
3250 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
3252 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3254 if (GET_CODE (tem
) == ABS
)
3255 return const_true_rtx
;
3260 /* Optimize ! (abs(x) < 0.0). */
3261 if (trueop1
== CONST0_RTX (mode
))
3263 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
3265 if (GET_CODE (tem
) == ABS
)
3266 return const_true_rtx
;
3277 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3283 return equal
? const_true_rtx
: const0_rtx
;
3286 return ! equal
? const_true_rtx
: const0_rtx
;
3289 return op0lt
? const_true_rtx
: const0_rtx
;
3292 return op1lt
? const_true_rtx
: const0_rtx
;
3294 return op0ltu
? const_true_rtx
: const0_rtx
;
3296 return op1ltu
? const_true_rtx
: const0_rtx
;
3299 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
3302 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
3304 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
3306 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
3308 return const_true_rtx
;
3316 /* Simplify CODE, an operation with result mode MODE and three operands,
3317 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3318 a constant. Return 0 if no simplifications is possible. */
3321 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
3322 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
3325 unsigned int width
= GET_MODE_BITSIZE (mode
);
3327 /* VOIDmode means "infinite" precision. */
3329 width
= HOST_BITS_PER_WIDE_INT
;
3335 if (GET_CODE (op0
) == CONST_INT
3336 && GET_CODE (op1
) == CONST_INT
3337 && GET_CODE (op2
) == CONST_INT
3338 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
3339 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
3341 /* Extracting a bit-field from a constant */
3342 HOST_WIDE_INT val
= INTVAL (op0
);
3344 if (BITS_BIG_ENDIAN
)
3345 val
>>= (GET_MODE_BITSIZE (op0_mode
)
3346 - INTVAL (op2
) - INTVAL (op1
));
3348 val
>>= INTVAL (op2
);
3350 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
3352 /* First zero-extend. */
3353 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
3354 /* If desired, propagate sign bit. */
3355 if (code
== SIGN_EXTRACT
3356 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
3357 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
3360 /* Clear the bits that don't belong in our mode,
3361 unless they and our sign bit are all one.
3362 So we get either a reasonable negative value or a reasonable
3363 unsigned value for this mode. */
3364 if (width
< HOST_BITS_PER_WIDE_INT
3365 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
3366 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
3367 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3369 return gen_int_mode (val
, mode
);
3374 if (GET_CODE (op0
) == CONST_INT
)
3375 return op0
!= const0_rtx
? op1
: op2
;
3377 /* Convert c ? a : a into "a". */
3378 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
3381 /* Convert a != b ? a : b into "a". */
3382 if (GET_CODE (op0
) == NE
3383 && ! side_effects_p (op0
)
3384 && ! HONOR_NANS (mode
)
3385 && ! HONOR_SIGNED_ZEROS (mode
)
3386 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3387 && rtx_equal_p (XEXP (op0
, 1), op2
))
3388 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3389 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3392 /* Convert a == b ? a : b into "b". */
3393 if (GET_CODE (op0
) == EQ
3394 && ! side_effects_p (op0
)
3395 && ! HONOR_NANS (mode
)
3396 && ! HONOR_SIGNED_ZEROS (mode
)
3397 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
3398 && rtx_equal_p (XEXP (op0
, 1), op2
))
3399 || (rtx_equal_p (XEXP (op0
, 0), op2
)
3400 && rtx_equal_p (XEXP (op0
, 1), op1
))))
3403 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
3405 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
3406 ? GET_MODE (XEXP (op0
, 1))
3407 : GET_MODE (XEXP (op0
, 0)));
3410 /* Look for happy constants in op1 and op2. */
3411 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
3413 HOST_WIDE_INT t
= INTVAL (op1
);
3414 HOST_WIDE_INT f
= INTVAL (op2
);
3416 if (t
== STORE_FLAG_VALUE
&& f
== 0)
3417 code
= GET_CODE (op0
);
3418 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
3421 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
3429 return simplify_gen_relational (code
, mode
, cmp_mode
,
3430 XEXP (op0
, 0), XEXP (op0
, 1));
3433 if (cmp_mode
== VOIDmode
)
3434 cmp_mode
= op0_mode
;
3435 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
3436 cmp_mode
, XEXP (op0
, 0),
3439 /* See if any simplifications were possible. */
3442 if (GET_CODE (temp
) == CONST_INT
)
3443 return temp
== const0_rtx
? op2
: op1
;
3445 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
3451 gcc_assert (GET_MODE (op0
) == mode
);
3452 gcc_assert (GET_MODE (op1
) == mode
);
3453 gcc_assert (VECTOR_MODE_P (mode
));
3454 op2
= avoid_constant_pool_reference (op2
);
3455 if (GET_CODE (op2
) == CONST_INT
)
3457 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3458 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3459 int mask
= (1 << n_elts
) - 1;
3461 if (!(INTVAL (op2
) & mask
))
3463 if ((INTVAL (op2
) & mask
) == mask
)
3466 op0
= avoid_constant_pool_reference (op0
);
3467 op1
= avoid_constant_pool_reference (op1
);
3468 if (GET_CODE (op0
) == CONST_VECTOR
3469 && GET_CODE (op1
) == CONST_VECTOR
)
3471 rtvec v
= rtvec_alloc (n_elts
);
3474 for (i
= 0; i
< n_elts
; i
++)
3475 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
3476 ? CONST_VECTOR_ELT (op0
, i
)
3477 : CONST_VECTOR_ELT (op1
, i
));
3478 return gen_rtx_CONST_VECTOR (mode
, v
);
3490 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3491 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3493 Works by unpacking OP into a collection of 8-bit values
3494 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3495 and then repacking them again for OUTERMODE. */
3498 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
3499 enum machine_mode innermode
, unsigned int byte
)
3501 /* We support up to 512-bit values (for V8DFmode). */
3505 value_mask
= (1 << value_bit
) - 1
3507 unsigned char value
[max_bitsize
/ value_bit
];
3516 rtvec result_v
= NULL
;
3517 enum mode_class outer_class
;
3518 enum machine_mode outer_submode
;
3520 /* Some ports misuse CCmode. */
3521 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
3524 /* We have no way to represent a complex constant at the rtl level. */
3525 if (COMPLEX_MODE_P (outermode
))
3528 /* Unpack the value. */
3530 if (GET_CODE (op
) == CONST_VECTOR
)
3532 num_elem
= CONST_VECTOR_NUNITS (op
);
3533 elems
= &CONST_VECTOR_ELT (op
, 0);
3534 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
3540 elem_bitsize
= max_bitsize
;
3542 /* If this asserts, it is too complicated; reducing value_bit may help. */
3543 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
3544 /* I don't know how to handle endianness of sub-units. */
3545 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
3547 for (elem
= 0; elem
< num_elem
; elem
++)
3550 rtx el
= elems
[elem
];
3552 /* Vectors are kept in target memory order. (This is probably
3555 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3556 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3558 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3559 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3560 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3561 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3562 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3565 switch (GET_CODE (el
))
3569 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3571 *vp
++ = INTVAL (el
) >> i
;
3572 /* CONST_INTs are always logically sign-extended. */
3573 for (; i
< elem_bitsize
; i
+= value_bit
)
3574 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
3578 if (GET_MODE (el
) == VOIDmode
)
3580 /* If this triggers, someone should have generated a
3581 CONST_INT instead. */
3582 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
3584 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
3585 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
3586 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
3589 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
3592 /* It shouldn't matter what's done here, so fill it with
3594 for (; i
< max_bitsize
; i
+= value_bit
)
3599 long tmp
[max_bitsize
/ 32];
3600 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
3602 gcc_assert (GET_MODE_CLASS (GET_MODE (el
)) == MODE_FLOAT
);
3603 gcc_assert (bitsize
<= elem_bitsize
);
3604 gcc_assert (bitsize
% value_bit
== 0);
3606 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
3609 /* real_to_target produces its result in words affected by
3610 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3611 and use WORDS_BIG_ENDIAN instead; see the documentation
3612 of SUBREG in rtl.texi. */
3613 for (i
= 0; i
< bitsize
; i
+= value_bit
)
3616 if (WORDS_BIG_ENDIAN
)
3617 ibase
= bitsize
- 1 - i
;
3620 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
3623 /* It shouldn't matter what's done here, so fill it with
3625 for (; i
< elem_bitsize
; i
+= value_bit
)
3635 /* Now, pick the right byte to start with. */
3636 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3637 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3638 will already have offset 0. */
3639 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
3641 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
3643 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3644 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3645 byte
= (subword_byte
% UNITS_PER_WORD
3646 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3649 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3650 so if it's become negative it will instead be very large.) */
3651 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
3653 /* Convert from bytes to chunks of size value_bit. */
3654 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
3656 /* Re-pack the value. */
3658 if (VECTOR_MODE_P (outermode
))
3660 num_elem
= GET_MODE_NUNITS (outermode
);
3661 result_v
= rtvec_alloc (num_elem
);
3662 elems
= &RTVEC_ELT (result_v
, 0);
3663 outer_submode
= GET_MODE_INNER (outermode
);
3669 outer_submode
= outermode
;
3672 outer_class
= GET_MODE_CLASS (outer_submode
);
3673 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
3675 gcc_assert (elem_bitsize
% value_bit
== 0);
3676 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
3678 for (elem
= 0; elem
< num_elem
; elem
++)
3682 /* Vectors are stored in target memory order. (This is probably
3685 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3686 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3688 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3689 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3690 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3691 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3692 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3695 switch (outer_class
)
3698 case MODE_PARTIAL_INT
:
3700 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
3703 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3705 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
3706 for (; i
< elem_bitsize
; i
+= value_bit
)
3707 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
3708 << (i
- HOST_BITS_PER_WIDE_INT
));
3710 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3712 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3713 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
3715 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
3722 long tmp
[max_bitsize
/ 32];
3724 /* real_from_target wants its input in words affected by
3725 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3726 and use WORDS_BIG_ENDIAN instead; see the documentation
3727 of SUBREG in rtl.texi. */
3728 for (i
= 0; i
< max_bitsize
/ 32; i
++)
3730 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
3733 if (WORDS_BIG_ENDIAN
)
3734 ibase
= elem_bitsize
- 1 - i
;
3737 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
3740 real_from_target (&r
, tmp
, outer_submode
);
3741 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
3749 if (VECTOR_MODE_P (outermode
))
3750 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
3755 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3756 Return 0 if no simplifications are possible. */
3758 simplify_subreg (enum machine_mode outermode
, rtx op
,
3759 enum machine_mode innermode
, unsigned int byte
)
3761 /* Little bit of sanity checking. */
3762 gcc_assert (innermode
!= VOIDmode
);
3763 gcc_assert (outermode
!= VOIDmode
);
3764 gcc_assert (innermode
!= BLKmode
);
3765 gcc_assert (outermode
!= BLKmode
);
3767 gcc_assert (GET_MODE (op
) == innermode
3768 || GET_MODE (op
) == VOIDmode
);
3770 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
3771 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
3773 if (outermode
== innermode
&& !byte
)
3776 if (GET_CODE (op
) == CONST_INT
3777 || GET_CODE (op
) == CONST_DOUBLE
3778 || GET_CODE (op
) == CONST_VECTOR
)
3779 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
3781 /* Changing mode twice with SUBREG => just change it once,
3782 or not at all if changing back op starting mode. */
3783 if (GET_CODE (op
) == SUBREG
)
3785 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3786 int final_offset
= byte
+ SUBREG_BYTE (op
);
3789 if (outermode
== innermostmode
3790 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3791 return SUBREG_REG (op
);
3793 /* The SUBREG_BYTE represents offset, as if the value were stored
3794 in memory. Irritating exception is paradoxical subreg, where
3795 we define SUBREG_BYTE to be 0. On big endian machines, this
3796 value should be negative. For a moment, undo this exception. */
3797 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3799 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3800 if (WORDS_BIG_ENDIAN
)
3801 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3802 if (BYTES_BIG_ENDIAN
)
3803 final_offset
+= difference
% UNITS_PER_WORD
;
3805 if (SUBREG_BYTE (op
) == 0
3806 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3808 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3809 if (WORDS_BIG_ENDIAN
)
3810 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3811 if (BYTES_BIG_ENDIAN
)
3812 final_offset
+= difference
% UNITS_PER_WORD
;
3815 /* See whether resulting subreg will be paradoxical. */
3816 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3818 /* In nonparadoxical subregs we can't handle negative offsets. */
3819 if (final_offset
< 0)
3821 /* Bail out in case resulting subreg would be incorrect. */
3822 if (final_offset
% GET_MODE_SIZE (outermode
)
3823 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3829 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3831 /* In paradoxical subreg, see if we are still looking on lower part.
3832 If so, our SUBREG_BYTE will be 0. */
3833 if (WORDS_BIG_ENDIAN
)
3834 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3835 if (BYTES_BIG_ENDIAN
)
3836 offset
+= difference
% UNITS_PER_WORD
;
3837 if (offset
== final_offset
)
3843 /* Recurse for further possible simplifications. */
3844 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
3848 if (validate_subreg (outermode
, innermostmode
,
3849 SUBREG_REG (op
), final_offset
))
3850 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3854 /* SUBREG of a hard register => just change the register number
3855 and/or mode. If the hard register is not valid in that mode,
3856 suppress this simplification. If the hard register is the stack,
3857 frame, or argument pointer, leave this as a SUBREG. */
3860 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3861 #ifdef CANNOT_CHANGE_MODE_CLASS
3862 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3863 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3864 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3866 && ((reload_completed
&& !frame_pointer_needed
)
3867 || (REGNO (op
) != FRAME_POINTER_REGNUM
3868 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3869 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3872 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3873 && REGNO (op
) != ARG_POINTER_REGNUM
3875 && REGNO (op
) != STACK_POINTER_REGNUM
3876 && subreg_offset_representable_p (REGNO (op
), innermode
,
3879 unsigned int regno
= REGNO (op
);
3880 unsigned int final_regno
3881 = regno
+ subreg_regno_offset (regno
, innermode
, byte
, outermode
);
3883 /* ??? We do allow it if the current REG is not valid for
3884 its mode. This is a kludge to work around how float/complex
3885 arguments are passed on 32-bit SPARC and should be fixed. */
3886 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3887 || ! HARD_REGNO_MODE_OK (regno
, innermode
))
3889 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3891 /* Propagate original regno. We don't have any way to specify
3892 the offset inside original regno, so do so only for lowpart.
3893 The information is used only by alias analysis that can not
3894 grog partial register anyway. */
3896 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3897 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3902 /* If we have a SUBREG of a register that we are replacing and we are
3903 replacing it with a MEM, make a new MEM and try replacing the
3904 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3905 or if we would be widening it. */
3908 && ! mode_dependent_address_p (XEXP (op
, 0))
3909 /* Allow splitting of volatile memory references in case we don't
3910 have instruction to move the whole thing. */
3911 && (! MEM_VOLATILE_P (op
)
3912 || ! have_insn_for (SET
, innermode
))
3913 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3914 return adjust_address_nv (op
, outermode
, byte
);
3916 /* Handle complex values represented as CONCAT
3917 of real and imaginary part. */
3918 if (GET_CODE (op
) == CONCAT
)
3920 unsigned int inner_size
, final_offset
;
3923 inner_size
= GET_MODE_UNIT_SIZE (innermode
);
3924 part
= byte
< inner_size
? XEXP (op
, 0) : XEXP (op
, 1);
3925 final_offset
= byte
% inner_size
;
3926 if (final_offset
+ GET_MODE_SIZE (outermode
) > inner_size
)
3929 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3932 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
3933 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3937 /* Optimize SUBREG truncations of zero and sign extended values. */
3938 if ((GET_CODE (op
) == ZERO_EXTEND
3939 || GET_CODE (op
) == SIGN_EXTEND
)
3940 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
3942 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
3944 /* If we're requesting the lowpart of a zero or sign extension,
3945 there are three possibilities. If the outermode is the same
3946 as the origmode, we can omit both the extension and the subreg.
3947 If the outermode is not larger than the origmode, we can apply
3948 the truncation without the extension. Finally, if the outermode
3949 is larger than the origmode, but both are integer modes, we
3950 can just extend to the appropriate mode. */
3953 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
3954 if (outermode
== origmode
)
3955 return XEXP (op
, 0);
3956 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
3957 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
3958 subreg_lowpart_offset (outermode
,
3960 if (SCALAR_INT_MODE_P (outermode
))
3961 return simplify_gen_unary (GET_CODE (op
), outermode
,
3962 XEXP (op
, 0), origmode
);
3965 /* A SUBREG resulting from a zero extension may fold to zero if
3966 it extracts higher bits that the ZERO_EXTEND's source bits. */
3967 if (GET_CODE (op
) == ZERO_EXTEND
3968 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
3969 return CONST0_RTX (outermode
);
3972 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3973 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3974 the outer subreg is effectively a truncation to the original mode. */
3975 if ((GET_CODE (op
) == LSHIFTRT
3976 || GET_CODE (op
) == ASHIFTRT
)
3977 && SCALAR_INT_MODE_P (outermode
)
3978 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3979 to avoid the possibility that an outer LSHIFTRT shifts by more
3980 than the sign extension's sign_bit_copies and introduces zeros
3981 into the high bits of the result. */
3982 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
3983 && GET_CODE (XEXP (op
, 1)) == CONST_INT
3984 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
3985 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
3986 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
3987 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
3988 return simplify_gen_binary (ASHIFTRT
, outermode
,
3989 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
3991 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
3992 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
3993 the outer subreg is effectively a truncation to the original mode. */
3994 if ((GET_CODE (op
) == LSHIFTRT
3995 || GET_CODE (op
) == ASHIFTRT
)
3996 && SCALAR_INT_MODE_P (outermode
)
3997 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
3998 && GET_CODE (XEXP (op
, 1)) == CONST_INT
3999 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4000 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4001 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4002 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4003 return simplify_gen_binary (LSHIFTRT
, outermode
,
4004 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4006 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4007 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4008 the outer subreg is effectively a truncation to the original mode. */
4009 if (GET_CODE (op
) == ASHIFT
4010 && SCALAR_INT_MODE_P (outermode
)
4011 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
4012 && GET_CODE (XEXP (op
, 1)) == CONST_INT
4013 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
4014 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
4015 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
4016 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
4017 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
4018 return simplify_gen_binary (ASHIFT
, outermode
,
4019 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
4024 /* Make a SUBREG operation or equivalent if it folds. */
4027 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
4028 enum machine_mode innermode
, unsigned int byte
)
4032 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
4036 if (GET_CODE (op
) == SUBREG
4037 || GET_CODE (op
) == CONCAT
4038 || GET_MODE (op
) == VOIDmode
)
4041 if (validate_subreg (outermode
, innermode
, op
, byte
))
4042 return gen_rtx_SUBREG (outermode
, op
, byte
);
4047 /* Simplify X, an rtx expression.
4049 Return the simplified expression or NULL if no simplifications
4052 This is the preferred entry point into the simplification routines;
4053 however, we still allow passes to call the more specific routines.
4055 Right now GCC has three (yes, three) major bodies of RTL simplification
4056 code that need to be unified.
4058 1. fold_rtx in cse.c. This code uses various CSE specific
4059 information to aid in RTL simplification.
4061 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4062 it uses combine specific information to aid in RTL
4065 3. The routines in this file.
4068 Long term we want to only have one body of simplification code; to
4069 get to that state I recommend the following steps:
4071 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4072 which are not pass dependent state into these routines.
4074 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4075 use this routine whenever possible.
4077 3. Allow for pass dependent state to be provided to these
4078 routines and add simplifications based on the pass dependent
4079 state. Remove code from cse.c & combine.c that becomes
4082 It will take time, but ultimately the compiler will be easier to
4083 maintain and improve. It's totally silly that when we add a
4084 simplification that it needs to be added to 4 places (3 for RTL
4085 simplification and 1 for tree simplification. */
4088 simplify_rtx (rtx x
)
4090 enum rtx_code code
= GET_CODE (x
);
4091 enum machine_mode mode
= GET_MODE (x
);
4093 switch (GET_RTX_CLASS (code
))
4096 return simplify_unary_operation (code
, mode
,
4097 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
4098 case RTX_COMM_ARITH
:
4099 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
4100 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
4102 /* Fall through.... */
4105 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
4108 case RTX_BITFIELD_OPS
:
4109 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
4110 XEXP (x
, 0), XEXP (x
, 1),
4114 case RTX_COMM_COMPARE
:
4115 return simplify_relational_operation (code
, mode
,
4116 ((GET_MODE (XEXP (x
, 0))
4118 ? GET_MODE (XEXP (x
, 0))
4119 : GET_MODE (XEXP (x
, 1))),
4125 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
4126 GET_MODE (SUBREG_REG (x
)),
4133 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4134 if (GET_CODE (XEXP (x
, 0)) == HIGH
4135 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))