1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011, 2012 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
37 #include "diagnostic-core.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Much code operates on (low, high) pairs; the low value is an
44 unsigned wide int, the high value a signed wide int. We
45 occasionally need to sign extend from low to high as if low were a
47 #define HWI_SIGN_EXTEND(low) \
48 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
50 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
51 static bool plus_minus_operand_p (const_rtx
);
52 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
53 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
54 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
56 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
58 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
59 enum machine_mode
, rtx
, rtx
);
60 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
61 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
67 neg_const_int (enum machine_mode mode
, const_rtx i
)
69 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
78 unsigned HOST_WIDE_INT val
;
81 if (GET_MODE_CLASS (mode
) != MODE_INT
)
84 width
= GET_MODE_PRECISION (mode
);
88 if (width
<= HOST_BITS_PER_WIDE_INT
91 else if (width
<= HOST_BITS_PER_DOUBLE_INT
92 && CONST_DOUBLE_AS_INT_P (x
)
93 && CONST_DOUBLE_LOW (x
) == 0)
95 val
= CONST_DOUBLE_HIGH (x
);
96 width
-= HOST_BITS_PER_WIDE_INT
;
99 /* FIXME: We don't yet have a representation for wider modes. */
102 if (width
< HOST_BITS_PER_WIDE_INT
)
103 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
104 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
107 /* Test whether VAL is equal to the most significant bit of mode MODE
108 (after masking with the mode mask of MODE). Returns false if the
109 precision of MODE is too large to handle. */
112 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
116 if (GET_MODE_CLASS (mode
) != MODE_INT
)
119 width
= GET_MODE_PRECISION (mode
);
120 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
123 val
&= GET_MODE_MASK (mode
);
124 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
127 /* Test whether the most significant bit of mode MODE is set in VAL.
128 Returns false if the precision of MODE is too large to handle. */
130 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
134 if (GET_MODE_CLASS (mode
) != MODE_INT
)
137 width
= GET_MODE_PRECISION (mode
);
138 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
141 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
145 /* Test whether the most significant bit of mode MODE is clear in VAL.
146 Returns false if the precision of MODE is too large to handle. */
148 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
152 if (GET_MODE_CLASS (mode
) != MODE_INT
)
155 width
= GET_MODE_PRECISION (mode
);
156 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
159 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
163 /* Make a binary operation by properly ordering the operands and
164 seeing if the expression folds. */
167 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
172 /* If this simplifies, do it. */
173 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
177 /* Put complex operands first and constants second if commutative. */
178 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
179 && swap_commutative_operands_p (op0
, op1
))
180 tem
= op0
, op0
= op1
, op1
= tem
;
182 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
185 /* If X is a MEM referencing the constant pool, return the real value.
186 Otherwise return X. */
188 avoid_constant_pool_reference (rtx x
)
191 enum machine_mode cmode
;
192 HOST_WIDE_INT offset
= 0;
194 switch (GET_CODE (x
))
200 /* Handle float extensions of constant pool references. */
202 c
= avoid_constant_pool_reference (tmp
);
203 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
207 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
208 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
216 if (GET_MODE (x
) == BLKmode
)
221 /* Call target hook to avoid the effects of -fpic etc.... */
222 addr
= targetm
.delegitimize_address (addr
);
224 /* Split the address into a base and integer offset. */
225 if (GET_CODE (addr
) == CONST
226 && GET_CODE (XEXP (addr
, 0)) == PLUS
227 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
229 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
230 addr
= XEXP (XEXP (addr
, 0), 0);
233 if (GET_CODE (addr
) == LO_SUM
)
234 addr
= XEXP (addr
, 1);
236 /* If this is a constant pool reference, we can turn it into its
237 constant and hope that simplifications happen. */
238 if (GET_CODE (addr
) == SYMBOL_REF
239 && CONSTANT_POOL_ADDRESS_P (addr
))
241 c
= get_pool_constant (addr
);
242 cmode
= get_pool_mode (addr
);
244 /* If we're accessing the constant in a different mode than it was
245 originally stored, attempt to fix that up via subreg simplifications.
246 If that fails we have no choice but to return the original memory. */
247 if (offset
!= 0 || cmode
!= GET_MODE (x
))
249 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
250 if (tem
&& CONSTANT_P (tem
))
260 /* Simplify a MEM based on its attributes. This is the default
261 delegitimize_address target hook, and it's recommended that every
262 overrider call it. */
265 delegitimize_mem_from_attrs (rtx x
)
267 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
268 use their base addresses as equivalent. */
271 && MEM_OFFSET_KNOWN_P (x
))
273 tree decl
= MEM_EXPR (x
);
274 enum machine_mode mode
= GET_MODE (x
);
275 HOST_WIDE_INT offset
= 0;
277 switch (TREE_CODE (decl
))
287 case ARRAY_RANGE_REF
:
292 case VIEW_CONVERT_EXPR
:
294 HOST_WIDE_INT bitsize
, bitpos
;
296 int unsignedp
, volatilep
= 0;
298 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
299 &mode
, &unsignedp
, &volatilep
, false);
300 if (bitsize
!= GET_MODE_BITSIZE (mode
)
301 || (bitpos
% BITS_PER_UNIT
)
302 || (toffset
&& !host_integerp (toffset
, 0)))
306 offset
+= bitpos
/ BITS_PER_UNIT
;
308 offset
+= TREE_INT_CST_LOW (toffset
);
315 && mode
== GET_MODE (x
)
316 && TREE_CODE (decl
) == VAR_DECL
317 && (TREE_STATIC (decl
)
318 || DECL_THREAD_LOCAL_P (decl
))
319 && DECL_RTL_SET_P (decl
)
320 && MEM_P (DECL_RTL (decl
)))
324 offset
+= MEM_OFFSET (x
);
326 newx
= DECL_RTL (decl
);
330 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
332 /* Avoid creating a new MEM needlessly if we already had
333 the same address. We do if there's no OFFSET and the
334 old address X is identical to NEWX, or if X is of the
335 form (plus NEWX OFFSET), or the NEWX is of the form
336 (plus Y (const_int Z)) and X is that with the offset
337 added: (plus Y (const_int Z+OFFSET)). */
339 || (GET_CODE (o
) == PLUS
340 && GET_CODE (XEXP (o
, 1)) == CONST_INT
341 && (offset
== INTVAL (XEXP (o
, 1))
342 || (GET_CODE (n
) == PLUS
343 && GET_CODE (XEXP (n
, 1)) == CONST_INT
344 && (INTVAL (XEXP (n
, 1)) + offset
345 == INTVAL (XEXP (o
, 1)))
346 && (n
= XEXP (n
, 0))))
347 && (o
= XEXP (o
, 0))))
348 && rtx_equal_p (o
, n
)))
349 x
= adjust_address_nv (newx
, mode
, offset
);
351 else if (GET_MODE (x
) == GET_MODE (newx
)
360 /* Make a unary operation by first seeing if it folds and otherwise making
361 the specified operation. */
364 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
365 enum machine_mode op_mode
)
369 /* If this simplifies, use it. */
370 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
373 return gen_rtx_fmt_e (code
, mode
, op
);
376 /* Likewise for ternary operations. */
379 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
380 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
384 /* If this simplifies, use it. */
385 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
389 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
392 /* Likewise, for relational operations.
393 CMP_MODE specifies mode comparison is done in. */
396 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
397 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
401 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
405 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
408 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
409 and simplify the result. If FN is non-NULL, call this callback on each
410 X, if it returns non-NULL, replace X with its return value and simplify the
414 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
415 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
417 enum rtx_code code
= GET_CODE (x
);
418 enum machine_mode mode
= GET_MODE (x
);
419 enum machine_mode op_mode
;
421 rtx op0
, op1
, op2
, newx
, op
;
425 if (__builtin_expect (fn
!= NULL
, 0))
427 newx
= fn (x
, old_rtx
, data
);
431 else if (rtx_equal_p (x
, old_rtx
))
432 return copy_rtx ((rtx
) data
);
434 switch (GET_RTX_CLASS (code
))
438 op_mode
= GET_MODE (op0
);
439 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
440 if (op0
== XEXP (x
, 0))
442 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
446 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
447 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
448 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
450 return simplify_gen_binary (code
, mode
, op0
, op1
);
453 case RTX_COMM_COMPARE
:
456 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
457 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
458 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
459 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
461 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
464 case RTX_BITFIELD_OPS
:
466 op_mode
= GET_MODE (op0
);
467 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
468 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
469 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
470 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
472 if (op_mode
== VOIDmode
)
473 op_mode
= GET_MODE (op0
);
474 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
479 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
480 if (op0
== SUBREG_REG (x
))
482 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
483 GET_MODE (SUBREG_REG (x
)),
485 return op0
? op0
: x
;
492 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
493 if (op0
== XEXP (x
, 0))
495 return replace_equiv_address_nv (x
, op0
);
497 else if (code
== LO_SUM
)
499 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
500 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
502 /* (lo_sum (high x) x) -> x */
503 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
506 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
508 return gen_rtx_LO_SUM (mode
, op0
, op1
);
517 fmt
= GET_RTX_FORMAT (code
);
518 for (i
= 0; fmt
[i
]; i
++)
523 newvec
= XVEC (newx
, i
);
524 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
526 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
528 if (op
!= RTVEC_ELT (vec
, j
))
532 newvec
= shallow_copy_rtvec (vec
);
534 newx
= shallow_copy_rtx (x
);
535 XVEC (newx
, i
) = newvec
;
537 RTVEC_ELT (newvec
, j
) = op
;
545 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
546 if (op
!= XEXP (x
, i
))
549 newx
= shallow_copy_rtx (x
);
558 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
559 resulting RTX. Return a new RTX which is as simplified as possible. */
562 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
564 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
567 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
568 Only handle cases where the truncated value is inherently an rvalue.
570 RTL provides two ways of truncating a value:
572 1. a lowpart subreg. This form is only a truncation when both
573 the outer and inner modes (here MODE and OP_MODE respectively)
574 are scalar integers, and only then when the subreg is used as
577 It is only valid to form such truncating subregs if the
578 truncation requires no action by the target. The onus for
579 proving this is on the creator of the subreg -- e.g. the
580 caller to simplify_subreg or simplify_gen_subreg -- and typically
581 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
583 2. a TRUNCATE. This form handles both scalar and compound integers.
585 The first form is preferred where valid. However, the TRUNCATE
586 handling in simplify_unary_operation turns the second form into the
587 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
588 so it is generally safe to form rvalue truncations using:
590 simplify_gen_unary (TRUNCATE, ...)
592 and leave simplify_unary_operation to work out which representation
595 Because of the proof requirements on (1), simplify_truncation must
596 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
597 regardless of whether the outer truncation came from a SUBREG or a
598 TRUNCATE. For example, if the caller has proven that an SImode
603 is a no-op and can be represented as a subreg, it does not follow
604 that SImode truncations of X and Y are also no-ops. On a target
605 like 64-bit MIPS that requires SImode values to be stored in
606 sign-extended form, an SImode truncation of:
608 (and:DI (reg:DI X) (const_int 63))
610 is trivially a no-op because only the lower 6 bits can be set.
611 However, X is still an arbitrary 64-bit number and so we cannot
612 assume that truncating it too is a no-op. */
615 simplify_truncation (enum machine_mode mode
, rtx op
,
616 enum machine_mode op_mode
)
618 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
619 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
620 gcc_assert (precision
<= op_precision
);
622 /* Optimize truncations of zero and sign extended values. */
623 if (GET_CODE (op
) == ZERO_EXTEND
624 || GET_CODE (op
) == SIGN_EXTEND
)
626 /* There are three possibilities. If MODE is the same as the
627 origmode, we can omit both the extension and the subreg.
628 If MODE is not larger than the origmode, we can apply the
629 truncation without the extension. Finally, if the outermode
630 is larger than the origmode, we can just extend to the appropriate
632 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
633 if (mode
== origmode
)
635 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
636 return simplify_gen_unary (TRUNCATE
, mode
,
637 XEXP (op
, 0), origmode
);
639 return simplify_gen_unary (GET_CODE (op
), mode
,
640 XEXP (op
, 0), origmode
);
643 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
644 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
645 if (GET_CODE (op
) == PLUS
646 || GET_CODE (op
) == MINUS
647 || GET_CODE (op
) == MULT
)
649 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
652 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
654 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
658 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
659 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
660 the outer subreg is effectively a truncation to the original mode. */
661 if ((GET_CODE (op
) == LSHIFTRT
662 || GET_CODE (op
) == ASHIFTRT
)
663 /* Ensure that OP_MODE is at least twice as wide as MODE
664 to avoid the possibility that an outer LSHIFTRT shifts by more
665 than the sign extension's sign_bit_copies and introduces zeros
666 into the high bits of the result. */
667 && 2 * precision
<= op_precision
668 && CONST_INT_P (XEXP (op
, 1))
669 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
670 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
671 && UINTVAL (XEXP (op
, 1)) < precision
)
672 return simplify_gen_binary (ASHIFTRT
, mode
,
673 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
675 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
676 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
677 the outer subreg is effectively a truncation to the original mode. */
678 if ((GET_CODE (op
) == LSHIFTRT
679 || GET_CODE (op
) == ASHIFTRT
)
680 && CONST_INT_P (XEXP (op
, 1))
681 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
682 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
683 && UINTVAL (XEXP (op
, 1)) < precision
)
684 return simplify_gen_binary (LSHIFTRT
, mode
,
685 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
687 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
688 to (ashift:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if (GET_CODE (op
) == ASHIFT
691 && CONST_INT_P (XEXP (op
, 1))
692 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
693 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
694 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
695 && UINTVAL (XEXP (op
, 1)) < precision
)
696 return simplify_gen_binary (ASHIFT
, mode
,
697 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
699 /* Recognize a word extraction from a multi-word subreg. */
700 if ((GET_CODE (op
) == LSHIFTRT
701 || GET_CODE (op
) == ASHIFTRT
)
702 && SCALAR_INT_MODE_P (mode
)
703 && SCALAR_INT_MODE_P (op_mode
)
704 && precision
>= BITS_PER_WORD
705 && 2 * precision
<= op_precision
706 && CONST_INT_P (XEXP (op
, 1))
707 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
708 && UINTVAL (XEXP (op
, 1)) < op_precision
)
710 int byte
= subreg_lowpart_offset (mode
, op_mode
);
711 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
712 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
714 ? byte
- shifted_bytes
715 : byte
+ shifted_bytes
));
718 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
719 and try replacing the TRUNCATE and shift with it. Don't do this
720 if the MEM has a mode-dependent address. */
721 if ((GET_CODE (op
) == LSHIFTRT
722 || GET_CODE (op
) == ASHIFTRT
)
723 && SCALAR_INT_MODE_P (op_mode
)
724 && MEM_P (XEXP (op
, 0))
725 && CONST_INT_P (XEXP (op
, 1))
726 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
727 && INTVAL (XEXP (op
, 1)) > 0
728 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
729 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
730 MEM_ADDR_SPACE (XEXP (op
, 0)))
731 && ! MEM_VOLATILE_P (XEXP (op
, 0))
732 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
733 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
735 int byte
= subreg_lowpart_offset (mode
, op_mode
);
736 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
737 return adjust_address_nv (XEXP (op
, 0), mode
,
739 ? byte
- shifted_bytes
740 : byte
+ shifted_bytes
));
743 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
744 (OP:SI foo:SI) if OP is NEG or ABS. */
745 if ((GET_CODE (op
) == ABS
746 || GET_CODE (op
) == NEG
)
747 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
748 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
749 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
750 return simplify_gen_unary (GET_CODE (op
), mode
,
751 XEXP (XEXP (op
, 0), 0), mode
);
753 /* (truncate:A (subreg:B (truncate:C X) 0)) is
755 if (GET_CODE (op
) == SUBREG
756 && SCALAR_INT_MODE_P (mode
)
757 && SCALAR_INT_MODE_P (op_mode
)
758 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
759 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
760 && subreg_lowpart_p (op
))
761 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
762 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
764 /* (truncate:A (truncate:B X)) is (truncate:A X). */
765 if (GET_CODE (op
) == TRUNCATE
)
766 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
767 GET_MODE (XEXP (op
, 0)));
772 /* Try to simplify a unary operation CODE whose output mode is to be
773 MODE with input operand OP whose mode was originally OP_MODE.
774 Return zero if no simplification can be made. */
776 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
777 rtx op
, enum machine_mode op_mode
)
781 trueop
= avoid_constant_pool_reference (op
);
783 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
787 return simplify_unary_operation_1 (code
, mode
, op
);
790 /* Perform some simplifications we can do even if the operands
793 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
795 enum rtx_code reversed
;
801 /* (not (not X)) == X. */
802 if (GET_CODE (op
) == NOT
)
805 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
806 comparison is all ones. */
807 if (COMPARISON_P (op
)
808 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
809 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
810 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
811 XEXP (op
, 0), XEXP (op
, 1));
813 /* (not (plus X -1)) can become (neg X). */
814 if (GET_CODE (op
) == PLUS
815 && XEXP (op
, 1) == constm1_rtx
)
816 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
818 /* Similarly, (not (neg X)) is (plus X -1). */
819 if (GET_CODE (op
) == NEG
)
820 return plus_constant (mode
, XEXP (op
, 0), -1);
822 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
823 if (GET_CODE (op
) == XOR
824 && CONST_INT_P (XEXP (op
, 1))
825 && (temp
= simplify_unary_operation (NOT
, mode
,
826 XEXP (op
, 1), mode
)) != 0)
827 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
829 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
830 if (GET_CODE (op
) == PLUS
831 && CONST_INT_P (XEXP (op
, 1))
832 && mode_signbit_p (mode
, XEXP (op
, 1))
833 && (temp
= simplify_unary_operation (NOT
, mode
,
834 XEXP (op
, 1), mode
)) != 0)
835 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
838 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
839 operands other than 1, but that is not valid. We could do a
840 similar simplification for (not (lshiftrt C X)) where C is
841 just the sign bit, but this doesn't seem common enough to
843 if (GET_CODE (op
) == ASHIFT
844 && XEXP (op
, 0) == const1_rtx
)
846 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
847 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
850 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
851 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
852 so we can perform the above simplification. */
854 if (STORE_FLAG_VALUE
== -1
855 && GET_CODE (op
) == ASHIFTRT
856 && GET_CODE (XEXP (op
, 1))
857 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
858 return simplify_gen_relational (GE
, mode
, VOIDmode
,
859 XEXP (op
, 0), const0_rtx
);
862 if (GET_CODE (op
) == SUBREG
863 && subreg_lowpart_p (op
)
864 && (GET_MODE_SIZE (GET_MODE (op
))
865 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
866 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
867 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
869 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
872 x
= gen_rtx_ROTATE (inner_mode
,
873 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
875 XEXP (SUBREG_REG (op
), 1));
876 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
879 /* Apply De Morgan's laws to reduce number of patterns for machines
880 with negating logical insns (and-not, nand, etc.). If result has
881 only one NOT, put it first, since that is how the patterns are
884 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
886 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
887 enum machine_mode op_mode
;
889 op_mode
= GET_MODE (in1
);
890 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
892 op_mode
= GET_MODE (in2
);
893 if (op_mode
== VOIDmode
)
895 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
897 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
900 in2
= in1
; in1
= tem
;
903 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
909 /* (neg (neg X)) == X. */
910 if (GET_CODE (op
) == NEG
)
913 /* (neg (plus X 1)) can become (not X). */
914 if (GET_CODE (op
) == PLUS
915 && XEXP (op
, 1) == const1_rtx
)
916 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
918 /* Similarly, (neg (not X)) is (plus X 1). */
919 if (GET_CODE (op
) == NOT
)
920 return plus_constant (mode
, XEXP (op
, 0), 1);
922 /* (neg (minus X Y)) can become (minus Y X). This transformation
923 isn't safe for modes with signed zeros, since if X and Y are
924 both +0, (minus Y X) is the same as (minus X Y). If the
925 rounding mode is towards +infinity (or -infinity) then the two
926 expressions will be rounded differently. */
927 if (GET_CODE (op
) == MINUS
928 && !HONOR_SIGNED_ZEROS (mode
)
929 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
930 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
932 if (GET_CODE (op
) == PLUS
933 && !HONOR_SIGNED_ZEROS (mode
)
934 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
936 /* (neg (plus A C)) is simplified to (minus -C A). */
937 if (CONST_INT_P (XEXP (op
, 1))
938 || CONST_DOUBLE_P (XEXP (op
, 1)))
940 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
942 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
945 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
946 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
947 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
950 /* (neg (mult A B)) becomes (mult A (neg B)).
951 This works even for floating-point values. */
952 if (GET_CODE (op
) == MULT
953 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
955 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
956 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
959 /* NEG commutes with ASHIFT since it is multiplication. Only do
960 this if we can then eliminate the NEG (e.g., if the operand
962 if (GET_CODE (op
) == ASHIFT
)
964 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
966 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
969 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
970 C is equal to the width of MODE minus 1. */
971 if (GET_CODE (op
) == ASHIFTRT
972 && CONST_INT_P (XEXP (op
, 1))
973 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
974 return simplify_gen_binary (LSHIFTRT
, mode
,
975 XEXP (op
, 0), XEXP (op
, 1));
977 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
978 C is equal to the width of MODE minus 1. */
979 if (GET_CODE (op
) == LSHIFTRT
980 && CONST_INT_P (XEXP (op
, 1))
981 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
982 return simplify_gen_binary (ASHIFTRT
, mode
,
983 XEXP (op
, 0), XEXP (op
, 1));
985 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
986 if (GET_CODE (op
) == XOR
987 && XEXP (op
, 1) == const1_rtx
988 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
989 return plus_constant (mode
, XEXP (op
, 0), -1);
991 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
992 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
993 if (GET_CODE (op
) == LT
994 && XEXP (op
, 1) == const0_rtx
995 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
997 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
998 int isize
= GET_MODE_PRECISION (inner
);
999 if (STORE_FLAG_VALUE
== 1)
1001 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1002 GEN_INT (isize
- 1));
1005 if (GET_MODE_PRECISION (mode
) > isize
)
1006 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1007 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1009 else if (STORE_FLAG_VALUE
== -1)
1011 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1012 GEN_INT (isize
- 1));
1015 if (GET_MODE_PRECISION (mode
) > isize
)
1016 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1017 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1023 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1024 with the umulXi3_highpart patterns. */
1025 if (GET_CODE (op
) == LSHIFTRT
1026 && GET_CODE (XEXP (op
, 0)) == MULT
)
1029 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1031 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1032 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1033 /* We can't handle truncation to a partial integer mode here
1034 because we don't know the real bitsize of the partial
1039 if (GET_MODE (op
) != VOIDmode
)
1041 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1046 /* If we know that the value is already truncated, we can
1047 replace the TRUNCATE with a SUBREG. */
1048 if (GET_MODE_NUNITS (mode
) == 1
1049 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1050 || truncated_to_mode (mode
, op
)))
1051 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1053 /* A truncate of a comparison can be replaced with a subreg if
1054 STORE_FLAG_VALUE permits. This is like the previous test,
1055 but it works even if the comparison is done in a mode larger
1056 than HOST_BITS_PER_WIDE_INT. */
1057 if (HWI_COMPUTABLE_MODE_P (mode
)
1058 && COMPARISON_P (op
)
1059 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1060 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1062 /* A truncate of a memory is just loading the low part of the memory
1063 if we are not changing the meaning of the address. */
1064 if (GET_CODE (op
) == MEM
1065 && !VECTOR_MODE_P (mode
)
1066 && !MEM_VOLATILE_P (op
)
1067 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1068 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1072 case FLOAT_TRUNCATE
:
1073 if (DECIMAL_FLOAT_MODE_P (mode
))
1076 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1077 if (GET_CODE (op
) == FLOAT_EXTEND
1078 && GET_MODE (XEXP (op
, 0)) == mode
)
1079 return XEXP (op
, 0);
1081 /* (float_truncate:SF (float_truncate:DF foo:XF))
1082 = (float_truncate:SF foo:XF).
1083 This may eliminate double rounding, so it is unsafe.
1085 (float_truncate:SF (float_extend:XF foo:DF))
1086 = (float_truncate:SF foo:DF).
1088 (float_truncate:DF (float_extend:XF foo:SF))
1089 = (float_extend:SF foo:DF). */
1090 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1091 && flag_unsafe_math_optimizations
)
1092 || GET_CODE (op
) == FLOAT_EXTEND
)
1093 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1095 > GET_MODE_SIZE (mode
)
1096 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1098 XEXP (op
, 0), mode
);
1100 /* (float_truncate (float x)) is (float x) */
1101 if (GET_CODE (op
) == FLOAT
1102 && (flag_unsafe_math_optimizations
1103 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1104 && ((unsigned)significand_size (GET_MODE (op
))
1105 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1106 - num_sign_bit_copies (XEXP (op
, 0),
1107 GET_MODE (XEXP (op
, 0))))))))
1108 return simplify_gen_unary (FLOAT
, mode
,
1110 GET_MODE (XEXP (op
, 0)));
1112 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1113 (OP:SF foo:SF) if OP is NEG or ABS. */
1114 if ((GET_CODE (op
) == ABS
1115 || GET_CODE (op
) == NEG
)
1116 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1117 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1118 return simplify_gen_unary (GET_CODE (op
), mode
,
1119 XEXP (XEXP (op
, 0), 0), mode
);
1121 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1122 is (float_truncate:SF x). */
1123 if (GET_CODE (op
) == SUBREG
1124 && subreg_lowpart_p (op
)
1125 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1126 return SUBREG_REG (op
);
1130 if (DECIMAL_FLOAT_MODE_P (mode
))
1133 /* (float_extend (float_extend x)) is (float_extend x)
1135 (float_extend (float x)) is (float x) assuming that double
1136 rounding can't happen.
1138 if (GET_CODE (op
) == FLOAT_EXTEND
1139 || (GET_CODE (op
) == FLOAT
1140 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1141 && ((unsigned)significand_size (GET_MODE (op
))
1142 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1143 - num_sign_bit_copies (XEXP (op
, 0),
1144 GET_MODE (XEXP (op
, 0)))))))
1145 return simplify_gen_unary (GET_CODE (op
), mode
,
1147 GET_MODE (XEXP (op
, 0)));
1152 /* (abs (neg <foo>)) -> (abs <foo>) */
1153 if (GET_CODE (op
) == NEG
)
1154 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1155 GET_MODE (XEXP (op
, 0)));
1157 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1159 if (GET_MODE (op
) == VOIDmode
)
1162 /* If operand is something known to be positive, ignore the ABS. */
1163 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1164 || val_signbit_known_clear_p (GET_MODE (op
),
1165 nonzero_bits (op
, GET_MODE (op
))))
1168 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1169 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1170 return gen_rtx_NEG (mode
, op
);
1175 /* (ffs (*_extend <X>)) = (ffs <X>) */
1176 if (GET_CODE (op
) == SIGN_EXTEND
1177 || GET_CODE (op
) == ZERO_EXTEND
)
1178 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1179 GET_MODE (XEXP (op
, 0)));
1183 switch (GET_CODE (op
))
1187 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1188 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1189 GET_MODE (XEXP (op
, 0)));
1193 /* Rotations don't affect popcount. */
1194 if (!side_effects_p (XEXP (op
, 1)))
1195 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1196 GET_MODE (XEXP (op
, 0)));
1205 switch (GET_CODE (op
))
1211 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1212 GET_MODE (XEXP (op
, 0)));
1216 /* Rotations don't affect parity. */
1217 if (!side_effects_p (XEXP (op
, 1)))
1218 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1219 GET_MODE (XEXP (op
, 0)));
1228 /* (bswap (bswap x)) -> x. */
1229 if (GET_CODE (op
) == BSWAP
)
1230 return XEXP (op
, 0);
1234 /* (float (sign_extend <X>)) = (float <X>). */
1235 if (GET_CODE (op
) == SIGN_EXTEND
)
1236 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1237 GET_MODE (XEXP (op
, 0)));
1241 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1242 becomes just the MINUS if its mode is MODE. This allows
1243 folding switch statements on machines using casesi (such as
1245 if (GET_CODE (op
) == TRUNCATE
1246 && GET_MODE (XEXP (op
, 0)) == mode
1247 && GET_CODE (XEXP (op
, 0)) == MINUS
1248 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1249 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1250 return XEXP (op
, 0);
1252 /* Extending a widening multiplication should be canonicalized to
1253 a wider widening multiplication. */
1254 if (GET_CODE (op
) == MULT
)
1256 rtx lhs
= XEXP (op
, 0);
1257 rtx rhs
= XEXP (op
, 1);
1258 enum rtx_code lcode
= GET_CODE (lhs
);
1259 enum rtx_code rcode
= GET_CODE (rhs
);
1261 /* Widening multiplies usually extend both operands, but sometimes
1262 they use a shift to extract a portion of a register. */
1263 if ((lcode
== SIGN_EXTEND
1264 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1265 && (rcode
== SIGN_EXTEND
1266 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1268 enum machine_mode lmode
= GET_MODE (lhs
);
1269 enum machine_mode rmode
= GET_MODE (rhs
);
1272 if (lcode
== ASHIFTRT
)
1273 /* Number of bits not shifted off the end. */
1274 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1275 else /* lcode == SIGN_EXTEND */
1276 /* Size of inner mode. */
1277 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1279 if (rcode
== ASHIFTRT
)
1280 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1281 else /* rcode == SIGN_EXTEND */
1282 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1284 /* We can only widen multiplies if the result is mathematiclly
1285 equivalent. I.e. if overflow was impossible. */
1286 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1287 return simplify_gen_binary
1289 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1290 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1294 /* Check for a sign extension of a subreg of a promoted
1295 variable, where the promotion is sign-extended, and the
1296 target mode is the same as the variable's promotion. */
1297 if (GET_CODE (op
) == SUBREG
1298 && SUBREG_PROMOTED_VAR_P (op
)
1299 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1300 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1301 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1303 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1304 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1305 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1307 gcc_assert (GET_MODE_BITSIZE (mode
)
1308 > GET_MODE_BITSIZE (GET_MODE (op
)));
1309 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1310 GET_MODE (XEXP (op
, 0)));
1313 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1314 is (sign_extend:M (subreg:O <X>)) if there is mode with
1315 GET_MODE_BITSIZE (N) - I bits.
1316 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1317 is similarly (zero_extend:M (subreg:O <X>)). */
1318 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1319 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1320 && CONST_INT_P (XEXP (op
, 1))
1321 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1322 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1324 enum machine_mode tmode
1325 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1326 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1327 gcc_assert (GET_MODE_BITSIZE (mode
)
1328 > GET_MODE_BITSIZE (GET_MODE (op
)));
1329 if (tmode
!= BLKmode
)
1332 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1333 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1334 ? SIGN_EXTEND
: ZERO_EXTEND
,
1335 mode
, inner
, tmode
);
1339 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1340 /* As we do not know which address space the pointer is referring to,
1341 we can do this only if the target does not support different pointer
1342 or address modes depending on the address space. */
1343 if (target_default_pointer_address_modes_p ()
1344 && ! POINTERS_EXTEND_UNSIGNED
1345 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1347 || (GET_CODE (op
) == SUBREG
1348 && REG_P (SUBREG_REG (op
))
1349 && REG_POINTER (SUBREG_REG (op
))
1350 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1351 return convert_memory_address (Pmode
, op
);
1356 /* Check for a zero extension of a subreg of a promoted
1357 variable, where the promotion is zero-extended, and the
1358 target mode is the same as the variable's promotion. */
1359 if (GET_CODE (op
) == SUBREG
1360 && SUBREG_PROMOTED_VAR_P (op
)
1361 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1362 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1363 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1365 /* Extending a widening multiplication should be canonicalized to
1366 a wider widening multiplication. */
1367 if (GET_CODE (op
) == MULT
)
1369 rtx lhs
= XEXP (op
, 0);
1370 rtx rhs
= XEXP (op
, 1);
1371 enum rtx_code lcode
= GET_CODE (lhs
);
1372 enum rtx_code rcode
= GET_CODE (rhs
);
1374 /* Widening multiplies usually extend both operands, but sometimes
1375 they use a shift to extract a portion of a register. */
1376 if ((lcode
== ZERO_EXTEND
1377 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1378 && (rcode
== ZERO_EXTEND
1379 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1381 enum machine_mode lmode
= GET_MODE (lhs
);
1382 enum machine_mode rmode
= GET_MODE (rhs
);
1385 if (lcode
== LSHIFTRT
)
1386 /* Number of bits not shifted off the end. */
1387 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1388 else /* lcode == ZERO_EXTEND */
1389 /* Size of inner mode. */
1390 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1392 if (rcode
== LSHIFTRT
)
1393 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1394 else /* rcode == ZERO_EXTEND */
1395 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1397 /* We can only widen multiplies if the result is mathematiclly
1398 equivalent. I.e. if overflow was impossible. */
1399 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1400 return simplify_gen_binary
1402 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1403 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1407 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1408 if (GET_CODE (op
) == ZERO_EXTEND
)
1409 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1410 GET_MODE (XEXP (op
, 0)));
1412 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1413 is (zero_extend:M (subreg:O <X>)) if there is mode with
1414 GET_MODE_BITSIZE (N) - I bits. */
1415 if (GET_CODE (op
) == LSHIFTRT
1416 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1417 && CONST_INT_P (XEXP (op
, 1))
1418 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1419 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1421 enum machine_mode tmode
1422 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1423 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1424 if (tmode
!= BLKmode
)
1427 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1428 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1432 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1433 /* As we do not know which address space the pointer is referring to,
1434 we can do this only if the target does not support different pointer
1435 or address modes depending on the address space. */
1436 if (target_default_pointer_address_modes_p ()
1437 && POINTERS_EXTEND_UNSIGNED
> 0
1438 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1440 || (GET_CODE (op
) == SUBREG
1441 && REG_P (SUBREG_REG (op
))
1442 && REG_POINTER (SUBREG_REG (op
))
1443 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1444 return convert_memory_address (Pmode
, op
);
1455 /* Try to compute the value of a unary operation CODE whose output mode is to
1456 be MODE with input operand OP whose mode was originally OP_MODE.
1457 Return zero if the value cannot be computed. */
1459 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1460 rtx op
, enum machine_mode op_mode
)
1462 unsigned int width
= GET_MODE_PRECISION (mode
);
1463 unsigned int op_width
= GET_MODE_PRECISION (op_mode
);
1465 if (code
== VEC_DUPLICATE
)
1467 gcc_assert (VECTOR_MODE_P (mode
));
1468 if (GET_MODE (op
) != VOIDmode
)
1470 if (!VECTOR_MODE_P (GET_MODE (op
)))
1471 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1473 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1476 if (CONST_INT_P (op
) || CONST_DOUBLE_P (op
)
1477 || GET_CODE (op
) == CONST_VECTOR
)
1479 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1480 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1481 rtvec v
= rtvec_alloc (n_elts
);
1484 if (GET_CODE (op
) != CONST_VECTOR
)
1485 for (i
= 0; i
< n_elts
; i
++)
1486 RTVEC_ELT (v
, i
) = op
;
1489 enum machine_mode inmode
= GET_MODE (op
);
1490 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1491 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1493 gcc_assert (in_n_elts
< n_elts
);
1494 gcc_assert ((n_elts
% in_n_elts
) == 0);
1495 for (i
= 0; i
< n_elts
; i
++)
1496 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1498 return gen_rtx_CONST_VECTOR (mode
, v
);
1502 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1504 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1505 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1506 enum machine_mode opmode
= GET_MODE (op
);
1507 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1508 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1509 rtvec v
= rtvec_alloc (n_elts
);
1512 gcc_assert (op_n_elts
== n_elts
);
1513 for (i
= 0; i
< n_elts
; i
++)
1515 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1516 CONST_VECTOR_ELT (op
, i
),
1517 GET_MODE_INNER (opmode
));
1520 RTVEC_ELT (v
, i
) = x
;
1522 return gen_rtx_CONST_VECTOR (mode
, v
);
1525 /* The order of these tests is critical so that, for example, we don't
1526 check the wrong mode (input vs. output) for a conversion operation,
1527 such as FIX. At some point, this should be simplified. */
1529 if (code
== FLOAT
&& (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1531 HOST_WIDE_INT hv
, lv
;
1534 if (CONST_INT_P (op
))
1535 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1537 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1539 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1540 d
= real_value_truncate (mode
, d
);
1541 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1543 else if (code
== UNSIGNED_FLOAT
1544 && (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1546 HOST_WIDE_INT hv
, lv
;
1549 if (CONST_INT_P (op
))
1550 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1552 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1554 if (op_mode
== VOIDmode
1555 || GET_MODE_PRECISION (op_mode
) > HOST_BITS_PER_DOUBLE_INT
)
1556 /* We should never get a negative number. */
1557 gcc_assert (hv
>= 0);
1558 else if (GET_MODE_PRECISION (op_mode
) <= HOST_BITS_PER_WIDE_INT
)
1559 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1561 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1562 d
= real_value_truncate (mode
, d
);
1563 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1566 if (CONST_INT_P (op
)
1567 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1569 HOST_WIDE_INT arg0
= INTVAL (op
);
1583 val
= (arg0
>= 0 ? arg0
: - arg0
);
1587 arg0
&= GET_MODE_MASK (mode
);
1588 val
= ffs_hwi (arg0
);
1592 arg0
&= GET_MODE_MASK (mode
);
1593 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1596 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 1;
1600 arg0
&= GET_MODE_MASK (mode
);
1602 val
= GET_MODE_PRECISION (mode
) - 1;
1604 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 2;
1606 val
= GET_MODE_PRECISION (mode
) - floor_log2 (~arg0
) - 2;
1610 arg0
&= GET_MODE_MASK (mode
);
1613 /* Even if the value at zero is undefined, we have to come
1614 up with some replacement. Seems good enough. */
1615 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1616 val
= GET_MODE_PRECISION (mode
);
1619 val
= ctz_hwi (arg0
);
1623 arg0
&= GET_MODE_MASK (mode
);
1626 val
++, arg0
&= arg0
- 1;
1630 arg0
&= GET_MODE_MASK (mode
);
1633 val
++, arg0
&= arg0
- 1;
1642 for (s
= 0; s
< width
; s
+= 8)
1644 unsigned int d
= width
- s
- 8;
1645 unsigned HOST_WIDE_INT byte
;
1646 byte
= (arg0
>> s
) & 0xff;
1657 /* When zero-extending a CONST_INT, we need to know its
1659 gcc_assert (op_mode
!= VOIDmode
);
1660 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1662 /* If we were really extending the mode,
1663 we would have to distinguish between zero-extension
1664 and sign-extension. */
1665 gcc_assert (width
== op_width
);
1668 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1669 val
= arg0
& GET_MODE_MASK (op_mode
);
1675 if (op_mode
== VOIDmode
)
1677 op_width
= GET_MODE_PRECISION (op_mode
);
1678 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1680 /* If we were really extending the mode,
1681 we would have to distinguish between zero-extension
1682 and sign-extension. */
1683 gcc_assert (width
== op_width
);
1686 else if (op_width
< HOST_BITS_PER_WIDE_INT
)
1688 val
= arg0
& GET_MODE_MASK (op_mode
);
1689 if (val_signbit_known_set_p (op_mode
, val
))
1690 val
|= ~GET_MODE_MASK (op_mode
);
1698 case FLOAT_TRUNCATE
:
1710 return gen_int_mode (val
, mode
);
1713 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1714 for a DImode operation on a CONST_INT. */
1715 else if (width
<= HOST_BITS_PER_DOUBLE_INT
1716 && (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1718 double_int first
, value
;
1720 if (CONST_DOUBLE_AS_INT_P (op
))
1721 first
= double_int::from_pair (CONST_DOUBLE_HIGH (op
),
1722 CONST_DOUBLE_LOW (op
));
1724 first
= double_int::from_shwi (INTVAL (op
));
1737 if (first
.is_negative ())
1746 value
.low
= ffs_hwi (first
.low
);
1747 else if (first
.high
!= 0)
1748 value
.low
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (first
.high
);
1755 if (first
.high
!= 0)
1756 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.high
) - 1
1757 - HOST_BITS_PER_WIDE_INT
;
1758 else if (first
.low
!= 0)
1759 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.low
) - 1;
1760 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1761 value
.low
= GET_MODE_PRECISION (mode
);
1767 value
.low
= ctz_hwi (first
.low
);
1768 else if (first
.high
!= 0)
1769 value
.low
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (first
.high
);
1770 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1771 value
.low
= GET_MODE_PRECISION (mode
);
1775 value
= double_int_zero
;
1779 first
.low
&= first
.low
- 1;
1784 first
.high
&= first
.high
- 1;
1789 value
= double_int_zero
;
1793 first
.low
&= first
.low
- 1;
1798 first
.high
&= first
.high
- 1;
1807 value
= double_int_zero
;
1808 for (s
= 0; s
< width
; s
+= 8)
1810 unsigned int d
= width
- s
- 8;
1811 unsigned HOST_WIDE_INT byte
;
1813 if (s
< HOST_BITS_PER_WIDE_INT
)
1814 byte
= (first
.low
>> s
) & 0xff;
1816 byte
= (first
.high
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1818 if (d
< HOST_BITS_PER_WIDE_INT
)
1819 value
.low
|= byte
<< d
;
1821 value
.high
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1827 /* This is just a change-of-mode, so do nothing. */
1832 gcc_assert (op_mode
!= VOIDmode
);
1834 if (op_width
> HOST_BITS_PER_WIDE_INT
)
1837 value
= double_int::from_uhwi (first
.low
& GET_MODE_MASK (op_mode
));
1841 if (op_mode
== VOIDmode
1842 || op_width
> HOST_BITS_PER_WIDE_INT
)
1846 value
.low
= first
.low
& GET_MODE_MASK (op_mode
);
1847 if (val_signbit_known_set_p (op_mode
, value
.low
))
1848 value
.low
|= ~GET_MODE_MASK (op_mode
);
1850 value
.high
= HWI_SIGN_EXTEND (value
.low
);
1861 return immed_double_int_const (value
, mode
);
1864 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1865 && SCALAR_FLOAT_MODE_P (mode
)
1866 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1868 REAL_VALUE_TYPE d
, t
;
1869 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1874 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1876 real_sqrt (&t
, mode
, &d
);
1880 d
= real_value_abs (&d
);
1883 d
= real_value_negate (&d
);
1885 case FLOAT_TRUNCATE
:
1886 d
= real_value_truncate (mode
, d
);
1889 /* All this does is change the mode, unless changing
1891 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1892 real_convert (&d
, mode
, &d
);
1895 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1902 real_to_target (tmp
, &d
, GET_MODE (op
));
1903 for (i
= 0; i
< 4; i
++)
1905 real_from_target (&d
, tmp
, mode
);
1911 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1914 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1915 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1916 && GET_MODE_CLASS (mode
) == MODE_INT
1917 && width
<= HOST_BITS_PER_DOUBLE_INT
&& width
> 0)
1919 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1920 operators are intentionally left unspecified (to ease implementation
1921 by target backends), for consistency, this routine implements the
1922 same semantics for constant folding as used by the middle-end. */
1924 /* This was formerly used only for non-IEEE float.
1925 eggert@twinsun.com says it is safe for IEEE also. */
1926 HOST_WIDE_INT xh
, xl
, th
, tl
;
1927 REAL_VALUE_TYPE x
, t
;
1928 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1932 if (REAL_VALUE_ISNAN (x
))
1935 /* Test against the signed upper bound. */
1936 if (width
> HOST_BITS_PER_WIDE_INT
)
1938 th
= ((unsigned HOST_WIDE_INT
) 1
1939 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1945 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1947 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1948 if (REAL_VALUES_LESS (t
, x
))
1955 /* Test against the signed lower bound. */
1956 if (width
> HOST_BITS_PER_WIDE_INT
)
1958 th
= (unsigned HOST_WIDE_INT
) (-1)
1959 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1965 tl
= (unsigned HOST_WIDE_INT
) (-1) << (width
- 1);
1967 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1968 if (REAL_VALUES_LESS (x
, t
))
1974 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1978 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1981 /* Test against the unsigned upper bound. */
1982 if (width
== HOST_BITS_PER_DOUBLE_INT
)
1987 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1989 th
= ((unsigned HOST_WIDE_INT
) 1
1990 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1996 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1998 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1999 if (REAL_VALUES_LESS (t
, x
))
2006 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
2012 return immed_double_const (xl
, xh
, mode
);
2018 /* Subroutine of simplify_binary_operation to simplify a commutative,
2019 associative binary operation CODE with result mode MODE, operating
2020 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2021 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2022 canonicalization is possible. */
2025 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
2030 /* Linearize the operator to the left. */
2031 if (GET_CODE (op1
) == code
)
2033 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2034 if (GET_CODE (op0
) == code
)
2036 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2037 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2040 /* "a op (b op c)" becomes "(b op c) op a". */
2041 if (! swap_commutative_operands_p (op1
, op0
))
2042 return simplify_gen_binary (code
, mode
, op1
, op0
);
2049 if (GET_CODE (op0
) == code
)
2051 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2052 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2054 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2055 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2058 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2059 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2061 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2063 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2064 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2066 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2073 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2074 and OP1. Return 0 if no simplification is possible.
2076 Don't use this for relational operations such as EQ or LT.
2077 Use simplify_relational_operation instead. */
2079 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2082 rtx trueop0
, trueop1
;
2085 /* Relational operations don't work here. We must know the mode
2086 of the operands in order to do the comparison correctly.
2087 Assuming a full word can give incorrect results.
2088 Consider comparing 128 with -128 in QImode. */
2089 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2090 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2092 /* Make sure the constant is second. */
2093 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2094 && swap_commutative_operands_p (op0
, op1
))
2096 tem
= op0
, op0
= op1
, op1
= tem
;
2099 trueop0
= avoid_constant_pool_reference (op0
);
2100 trueop1
= avoid_constant_pool_reference (op1
);
2102 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2105 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2108 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2109 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2110 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2111 actual constants. */
2114 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2115 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2117 rtx tem
, reversed
, opleft
, opright
;
2119 unsigned int width
= GET_MODE_PRECISION (mode
);
2121 /* Even if we can't compute a constant result,
2122 there are some cases worth simplifying. */
2127 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2128 when x is NaN, infinite, or finite and nonzero. They aren't
2129 when x is -0 and the rounding mode is not towards -infinity,
2130 since (-0) + 0 is then 0. */
2131 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2134 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2135 transformations are safe even for IEEE. */
2136 if (GET_CODE (op0
) == NEG
)
2137 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2138 else if (GET_CODE (op1
) == NEG
)
2139 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2141 /* (~a) + 1 -> -a */
2142 if (INTEGRAL_MODE_P (mode
)
2143 && GET_CODE (op0
) == NOT
2144 && trueop1
== const1_rtx
)
2145 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2147 /* Handle both-operands-constant cases. We can only add
2148 CONST_INTs to constants since the sum of relocatable symbols
2149 can't be handled by most assemblers. Don't add CONST_INT
2150 to CONST_INT since overflow won't be computed properly if wider
2151 than HOST_BITS_PER_WIDE_INT. */
2153 if ((GET_CODE (op0
) == CONST
2154 || GET_CODE (op0
) == SYMBOL_REF
2155 || GET_CODE (op0
) == LABEL_REF
)
2156 && CONST_INT_P (op1
))
2157 return plus_constant (mode
, op0
, INTVAL (op1
));
2158 else if ((GET_CODE (op1
) == CONST
2159 || GET_CODE (op1
) == SYMBOL_REF
2160 || GET_CODE (op1
) == LABEL_REF
)
2161 && CONST_INT_P (op0
))
2162 return plus_constant (mode
, op1
, INTVAL (op0
));
2164 /* See if this is something like X * C - X or vice versa or
2165 if the multiplication is written as a shift. If so, we can
2166 distribute and make a new multiply, shift, or maybe just
2167 have X (if C is 2 in the example above). But don't make
2168 something more expensive than we had before. */
2170 if (SCALAR_INT_MODE_P (mode
))
2172 double_int coeff0
, coeff1
;
2173 rtx lhs
= op0
, rhs
= op1
;
2175 coeff0
= double_int_one
;
2176 coeff1
= double_int_one
;
2178 if (GET_CODE (lhs
) == NEG
)
2180 coeff0
= double_int_minus_one
;
2181 lhs
= XEXP (lhs
, 0);
2183 else if (GET_CODE (lhs
) == MULT
2184 && CONST_INT_P (XEXP (lhs
, 1)))
2186 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2187 lhs
= XEXP (lhs
, 0);
2189 else if (GET_CODE (lhs
) == ASHIFT
2190 && CONST_INT_P (XEXP (lhs
, 1))
2191 && INTVAL (XEXP (lhs
, 1)) >= 0
2192 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2194 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2195 lhs
= XEXP (lhs
, 0);
2198 if (GET_CODE (rhs
) == NEG
)
2200 coeff1
= double_int_minus_one
;
2201 rhs
= XEXP (rhs
, 0);
2203 else if (GET_CODE (rhs
) == MULT
2204 && CONST_INT_P (XEXP (rhs
, 1)))
2206 coeff1
= double_int::from_shwi (INTVAL (XEXP (rhs
, 1)));
2207 rhs
= XEXP (rhs
, 0);
2209 else if (GET_CODE (rhs
) == ASHIFT
2210 && CONST_INT_P (XEXP (rhs
, 1))
2211 && INTVAL (XEXP (rhs
, 1)) >= 0
2212 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2214 coeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2215 rhs
= XEXP (rhs
, 0);
2218 if (rtx_equal_p (lhs
, rhs
))
2220 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2223 bool speed
= optimize_function_for_speed_p (cfun
);
2225 val
= coeff0
+ coeff1
;
2226 coeff
= immed_double_int_const (val
, mode
);
2228 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2229 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2234 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2235 if ((CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
2236 && GET_CODE (op0
) == XOR
2237 && (CONST_INT_P (XEXP (op0
, 1))
2238 || CONST_DOUBLE_AS_INT_P (XEXP (op0
, 1)))
2239 && mode_signbit_p (mode
, op1
))
2240 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2241 simplify_gen_binary (XOR
, mode
, op1
,
2244 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2245 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2246 && GET_CODE (op0
) == MULT
2247 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2251 in1
= XEXP (XEXP (op0
, 0), 0);
2252 in2
= XEXP (op0
, 1);
2253 return simplify_gen_binary (MINUS
, mode
, op1
,
2254 simplify_gen_binary (MULT
, mode
,
2258 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2259 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2261 if (COMPARISON_P (op0
)
2262 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2263 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2264 && (reversed
= reversed_comparison (op0
, mode
)))
2266 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2268 /* If one of the operands is a PLUS or a MINUS, see if we can
2269 simplify this by the associative law.
2270 Don't use the associative law for floating point.
2271 The inaccuracy makes it nonassociative,
2272 and subtle programs can break if operations are associated. */
2274 if (INTEGRAL_MODE_P (mode
)
2275 && (plus_minus_operand_p (op0
)
2276 || plus_minus_operand_p (op1
))
2277 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2280 /* Reassociate floating point addition only when the user
2281 specifies associative math operations. */
2282 if (FLOAT_MODE_P (mode
)
2283 && flag_associative_math
)
2285 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2292 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2293 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2294 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2295 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2297 rtx xop00
= XEXP (op0
, 0);
2298 rtx xop10
= XEXP (op1
, 0);
2301 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2303 if (REG_P (xop00
) && REG_P (xop10
)
2304 && GET_MODE (xop00
) == GET_MODE (xop10
)
2305 && REGNO (xop00
) == REGNO (xop10
)
2306 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2307 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2314 /* We can't assume x-x is 0 even with non-IEEE floating point,
2315 but since it is zero except in very strange circumstances, we
2316 will treat it as zero with -ffinite-math-only. */
2317 if (rtx_equal_p (trueop0
, trueop1
)
2318 && ! side_effects_p (op0
)
2319 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2320 return CONST0_RTX (mode
);
2322 /* Change subtraction from zero into negation. (0 - x) is the
2323 same as -x when x is NaN, infinite, or finite and nonzero.
2324 But if the mode has signed zeros, and does not round towards
2325 -infinity, then 0 - 0 is 0, not -0. */
2326 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2327 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2329 /* (-1 - a) is ~a. */
2330 if (trueop0
== constm1_rtx
)
2331 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2333 /* Subtracting 0 has no effect unless the mode has signed zeros
2334 and supports rounding towards -infinity. In such a case,
2336 if (!(HONOR_SIGNED_ZEROS (mode
)
2337 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2338 && trueop1
== CONST0_RTX (mode
))
2341 /* See if this is something like X * C - X or vice versa or
2342 if the multiplication is written as a shift. If so, we can
2343 distribute and make a new multiply, shift, or maybe just
2344 have X (if C is 2 in the example above). But don't make
2345 something more expensive than we had before. */
2347 if (SCALAR_INT_MODE_P (mode
))
2349 double_int coeff0
, negcoeff1
;
2350 rtx lhs
= op0
, rhs
= op1
;
2352 coeff0
= double_int_one
;
2353 negcoeff1
= double_int_minus_one
;
2355 if (GET_CODE (lhs
) == NEG
)
2357 coeff0
= double_int_minus_one
;
2358 lhs
= XEXP (lhs
, 0);
2360 else if (GET_CODE (lhs
) == MULT
2361 && CONST_INT_P (XEXP (lhs
, 1)))
2363 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2364 lhs
= XEXP (lhs
, 0);
2366 else if (GET_CODE (lhs
) == ASHIFT
2367 && CONST_INT_P (XEXP (lhs
, 1))
2368 && INTVAL (XEXP (lhs
, 1)) >= 0
2369 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2371 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2372 lhs
= XEXP (lhs
, 0);
2375 if (GET_CODE (rhs
) == NEG
)
2377 negcoeff1
= double_int_one
;
2378 rhs
= XEXP (rhs
, 0);
2380 else if (GET_CODE (rhs
) == MULT
2381 && CONST_INT_P (XEXP (rhs
, 1)))
2383 negcoeff1
= double_int::from_shwi (-INTVAL (XEXP (rhs
, 1)));
2384 rhs
= XEXP (rhs
, 0);
2386 else if (GET_CODE (rhs
) == ASHIFT
2387 && CONST_INT_P (XEXP (rhs
, 1))
2388 && INTVAL (XEXP (rhs
, 1)) >= 0
2389 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2391 negcoeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2392 negcoeff1
= -negcoeff1
;
2393 rhs
= XEXP (rhs
, 0);
2396 if (rtx_equal_p (lhs
, rhs
))
2398 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2401 bool speed
= optimize_function_for_speed_p (cfun
);
2403 val
= coeff0
+ negcoeff1
;
2404 coeff
= immed_double_int_const (val
, mode
);
2406 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2407 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2412 /* (a - (-b)) -> (a + b). True even for IEEE. */
2413 if (GET_CODE (op1
) == NEG
)
2414 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2416 /* (-x - c) may be simplified as (-c - x). */
2417 if (GET_CODE (op0
) == NEG
2418 && (CONST_INT_P (op1
) || CONST_DOUBLE_P (op1
)))
2420 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2422 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2425 /* Don't let a relocatable value get a negative coeff. */
2426 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2427 return simplify_gen_binary (PLUS
, mode
,
2429 neg_const_int (mode
, op1
));
2431 /* (x - (x & y)) -> (x & ~y) */
2432 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2434 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2436 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2437 GET_MODE (XEXP (op1
, 1)));
2438 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2440 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2442 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2443 GET_MODE (XEXP (op1
, 0)));
2444 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2448 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2449 by reversing the comparison code if valid. */
2450 if (STORE_FLAG_VALUE
== 1
2451 && trueop0
== const1_rtx
2452 && COMPARISON_P (op1
)
2453 && (reversed
= reversed_comparison (op1
, mode
)))
2456 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2457 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2458 && GET_CODE (op1
) == MULT
2459 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2463 in1
= XEXP (XEXP (op1
, 0), 0);
2464 in2
= XEXP (op1
, 1);
2465 return simplify_gen_binary (PLUS
, mode
,
2466 simplify_gen_binary (MULT
, mode
,
2471 /* Canonicalize (minus (neg A) (mult B C)) to
2472 (minus (mult (neg B) C) A). */
2473 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2474 && GET_CODE (op1
) == MULT
2475 && GET_CODE (op0
) == NEG
)
2479 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2480 in2
= XEXP (op1
, 1);
2481 return simplify_gen_binary (MINUS
, mode
,
2482 simplify_gen_binary (MULT
, mode
,
2487 /* If one of the operands is a PLUS or a MINUS, see if we can
2488 simplify this by the associative law. This will, for example,
2489 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2490 Don't use the associative law for floating point.
2491 The inaccuracy makes it nonassociative,
2492 and subtle programs can break if operations are associated. */
2494 if (INTEGRAL_MODE_P (mode
)
2495 && (plus_minus_operand_p (op0
)
2496 || plus_minus_operand_p (op1
))
2497 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2502 if (trueop1
== constm1_rtx
)
2503 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2505 if (GET_CODE (op0
) == NEG
)
2507 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2508 /* If op1 is a MULT as well and simplify_unary_operation
2509 just moved the NEG to the second operand, simplify_gen_binary
2510 below could through simplify_associative_operation move
2511 the NEG around again and recurse endlessly. */
2513 && GET_CODE (op1
) == MULT
2514 && GET_CODE (temp
) == MULT
2515 && XEXP (op1
, 0) == XEXP (temp
, 0)
2516 && GET_CODE (XEXP (temp
, 1)) == NEG
2517 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2520 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2522 if (GET_CODE (op1
) == NEG
)
2524 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2525 /* If op0 is a MULT as well and simplify_unary_operation
2526 just moved the NEG to the second operand, simplify_gen_binary
2527 below could through simplify_associative_operation move
2528 the NEG around again and recurse endlessly. */
2530 && GET_CODE (op0
) == MULT
2531 && GET_CODE (temp
) == MULT
2532 && XEXP (op0
, 0) == XEXP (temp
, 0)
2533 && GET_CODE (XEXP (temp
, 1)) == NEG
2534 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2537 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2540 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2541 x is NaN, since x * 0 is then also NaN. Nor is it valid
2542 when the mode has signed zeros, since multiplying a negative
2543 number by 0 will give -0, not 0. */
2544 if (!HONOR_NANS (mode
)
2545 && !HONOR_SIGNED_ZEROS (mode
)
2546 && trueop1
== CONST0_RTX (mode
)
2547 && ! side_effects_p (op0
))
2550 /* In IEEE floating point, x*1 is not equivalent to x for
2552 if (!HONOR_SNANS (mode
)
2553 && trueop1
== CONST1_RTX (mode
))
2556 /* Convert multiply by constant power of two into shift unless
2557 we are still generating RTL. This test is a kludge. */
2558 if (CONST_INT_P (trueop1
)
2559 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2560 /* If the mode is larger than the host word size, and the
2561 uppermost bit is set, then this isn't a power of two due
2562 to implicit sign extension. */
2563 && (width
<= HOST_BITS_PER_WIDE_INT
2564 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2565 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2567 /* Likewise for multipliers wider than a word. */
2568 if (CONST_DOUBLE_AS_INT_P (trueop1
)
2569 && GET_MODE (op0
) == mode
2570 && CONST_DOUBLE_LOW (trueop1
) == 0
2571 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0
2572 && (val
< HOST_BITS_PER_DOUBLE_INT
- 1
2573 || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_DOUBLE_INT
))
2574 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2575 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2577 /* x*2 is x+x and x*(-1) is -x */
2578 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2579 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2580 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2581 && GET_MODE (op0
) == mode
)
2584 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2586 if (REAL_VALUES_EQUAL (d
, dconst2
))
2587 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2589 if (!HONOR_SNANS (mode
)
2590 && REAL_VALUES_EQUAL (d
, dconstm1
))
2591 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2594 /* Optimize -x * -x as x * x. */
2595 if (FLOAT_MODE_P (mode
)
2596 && GET_CODE (op0
) == NEG
2597 && GET_CODE (op1
) == NEG
2598 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2599 && !side_effects_p (XEXP (op0
, 0)))
2600 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2602 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2603 if (SCALAR_FLOAT_MODE_P (mode
)
2604 && GET_CODE (op0
) == ABS
2605 && GET_CODE (op1
) == ABS
2606 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2607 && !side_effects_p (XEXP (op0
, 0)))
2608 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2610 /* Reassociate multiplication, but for floating point MULTs
2611 only when the user specifies unsafe math optimizations. */
2612 if (! FLOAT_MODE_P (mode
)
2613 || flag_unsafe_math_optimizations
)
2615 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2622 if (trueop1
== CONST0_RTX (mode
))
2624 if (INTEGRAL_MODE_P (mode
)
2625 && trueop1
== CONSTM1_RTX (mode
)
2626 && !side_effects_p (op0
))
2628 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2630 /* A | (~A) -> -1 */
2631 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2632 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2633 && ! side_effects_p (op0
)
2634 && SCALAR_INT_MODE_P (mode
))
2637 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2638 if (CONST_INT_P (op1
)
2639 && HWI_COMPUTABLE_MODE_P (mode
)
2640 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2641 && !side_effects_p (op0
))
2644 /* Canonicalize (X & C1) | C2. */
2645 if (GET_CODE (op0
) == AND
2646 && CONST_INT_P (trueop1
)
2647 && CONST_INT_P (XEXP (op0
, 1)))
2649 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2650 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2651 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2653 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2655 && !side_effects_p (XEXP (op0
, 0)))
2658 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2659 if (((c1
|c2
) & mask
) == mask
)
2660 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2662 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2663 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2665 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2666 gen_int_mode (c1
& ~c2
, mode
));
2667 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2671 /* Convert (A & B) | A to A. */
2672 if (GET_CODE (op0
) == AND
2673 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2674 || rtx_equal_p (XEXP (op0
, 1), op1
))
2675 && ! side_effects_p (XEXP (op0
, 0))
2676 && ! side_effects_p (XEXP (op0
, 1)))
2679 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2680 mode size to (rotate A CX). */
2682 if (GET_CODE (op1
) == ASHIFT
2683 || GET_CODE (op1
) == SUBREG
)
2694 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2695 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2696 && CONST_INT_P (XEXP (opleft
, 1))
2697 && CONST_INT_P (XEXP (opright
, 1))
2698 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2699 == GET_MODE_PRECISION (mode
)))
2700 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2702 /* Same, but for ashift that has been "simplified" to a wider mode
2703 by simplify_shift_const. */
2705 if (GET_CODE (opleft
) == SUBREG
2706 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2707 && GET_CODE (opright
) == LSHIFTRT
2708 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2709 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2710 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2711 && (GET_MODE_SIZE (GET_MODE (opleft
))
2712 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2713 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2714 SUBREG_REG (XEXP (opright
, 0)))
2715 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2716 && CONST_INT_P (XEXP (opright
, 1))
2717 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2718 == GET_MODE_PRECISION (mode
)))
2719 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2720 XEXP (SUBREG_REG (opleft
), 1));
2722 /* If we have (ior (and (X C1) C2)), simplify this by making
2723 C1 as small as possible if C1 actually changes. */
2724 if (CONST_INT_P (op1
)
2725 && (HWI_COMPUTABLE_MODE_P (mode
)
2726 || INTVAL (op1
) > 0)
2727 && GET_CODE (op0
) == AND
2728 && CONST_INT_P (XEXP (op0
, 1))
2729 && CONST_INT_P (op1
)
2730 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2731 return simplify_gen_binary (IOR
, mode
,
2733 (AND
, mode
, XEXP (op0
, 0),
2734 GEN_INT (UINTVAL (XEXP (op0
, 1))
2738 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2739 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2740 the PLUS does not affect any of the bits in OP1: then we can do
2741 the IOR as a PLUS and we can associate. This is valid if OP1
2742 can be safely shifted left C bits. */
2743 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2744 && GET_CODE (XEXP (op0
, 0)) == PLUS
2745 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2746 && CONST_INT_P (XEXP (op0
, 1))
2747 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2749 int count
= INTVAL (XEXP (op0
, 1));
2750 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2752 if (mask
>> count
== INTVAL (trueop1
)
2753 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2754 return simplify_gen_binary (ASHIFTRT
, mode
,
2755 plus_constant (mode
, XEXP (op0
, 0),
2760 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2766 if (trueop1
== CONST0_RTX (mode
))
2768 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2769 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2770 if (rtx_equal_p (trueop0
, trueop1
)
2771 && ! side_effects_p (op0
)
2772 && GET_MODE_CLASS (mode
) != MODE_CC
)
2773 return CONST0_RTX (mode
);
2775 /* Canonicalize XOR of the most significant bit to PLUS. */
2776 if ((CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
2777 && mode_signbit_p (mode
, op1
))
2778 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2779 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2780 if ((CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
2781 && GET_CODE (op0
) == PLUS
2782 && (CONST_INT_P (XEXP (op0
, 1))
2783 || CONST_DOUBLE_AS_INT_P (XEXP (op0
, 1)))
2784 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2785 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2786 simplify_gen_binary (XOR
, mode
, op1
,
2789 /* If we are XORing two things that have no bits in common,
2790 convert them into an IOR. This helps to detect rotation encoded
2791 using those methods and possibly other simplifications. */
2793 if (HWI_COMPUTABLE_MODE_P (mode
)
2794 && (nonzero_bits (op0
, mode
)
2795 & nonzero_bits (op1
, mode
)) == 0)
2796 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2798 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2799 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2802 int num_negated
= 0;
2804 if (GET_CODE (op0
) == NOT
)
2805 num_negated
++, op0
= XEXP (op0
, 0);
2806 if (GET_CODE (op1
) == NOT
)
2807 num_negated
++, op1
= XEXP (op1
, 0);
2809 if (num_negated
== 2)
2810 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2811 else if (num_negated
== 1)
2812 return simplify_gen_unary (NOT
, mode
,
2813 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2817 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2818 correspond to a machine insn or result in further simplifications
2819 if B is a constant. */
2821 if (GET_CODE (op0
) == AND
2822 && rtx_equal_p (XEXP (op0
, 1), op1
)
2823 && ! side_effects_p (op1
))
2824 return simplify_gen_binary (AND
, mode
,
2825 simplify_gen_unary (NOT
, mode
,
2826 XEXP (op0
, 0), mode
),
2829 else if (GET_CODE (op0
) == AND
2830 && rtx_equal_p (XEXP (op0
, 0), op1
)
2831 && ! side_effects_p (op1
))
2832 return simplify_gen_binary (AND
, mode
,
2833 simplify_gen_unary (NOT
, mode
,
2834 XEXP (op0
, 1), mode
),
2837 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2838 we can transform like this:
2839 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2840 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2841 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2842 Attempt a few simplifications when B and C are both constants. */
2843 if (GET_CODE (op0
) == AND
2844 && CONST_INT_P (op1
)
2845 && CONST_INT_P (XEXP (op0
, 1)))
2847 rtx a
= XEXP (op0
, 0);
2848 rtx b
= XEXP (op0
, 1);
2850 HOST_WIDE_INT bval
= INTVAL (b
);
2851 HOST_WIDE_INT cval
= INTVAL (c
);
2854 = simplify_binary_operation (AND
, mode
,
2855 simplify_gen_unary (NOT
, mode
, a
, mode
),
2857 if ((~cval
& bval
) == 0)
2859 /* Try to simplify ~A&C | ~B&C. */
2860 if (na_c
!= NULL_RTX
)
2861 return simplify_gen_binary (IOR
, mode
, na_c
,
2862 GEN_INT (~bval
& cval
));
2866 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2867 if (na_c
== const0_rtx
)
2869 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2870 GEN_INT (~cval
& bval
));
2871 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2872 GEN_INT (~bval
& cval
));
2877 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2878 comparison if STORE_FLAG_VALUE is 1. */
2879 if (STORE_FLAG_VALUE
== 1
2880 && trueop1
== const1_rtx
2881 && COMPARISON_P (op0
)
2882 && (reversed
= reversed_comparison (op0
, mode
)))
2885 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2886 is (lt foo (const_int 0)), so we can perform the above
2887 simplification if STORE_FLAG_VALUE is 1. */
2889 if (STORE_FLAG_VALUE
== 1
2890 && trueop1
== const1_rtx
2891 && GET_CODE (op0
) == LSHIFTRT
2892 && CONST_INT_P (XEXP (op0
, 1))
2893 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2894 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2896 /* (xor (comparison foo bar) (const_int sign-bit))
2897 when STORE_FLAG_VALUE is the sign bit. */
2898 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2899 && trueop1
== const_true_rtx
2900 && COMPARISON_P (op0
)
2901 && (reversed
= reversed_comparison (op0
, mode
)))
2904 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2910 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2912 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2914 if (HWI_COMPUTABLE_MODE_P (mode
))
2916 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2917 HOST_WIDE_INT nzop1
;
2918 if (CONST_INT_P (trueop1
))
2920 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2921 /* If we are turning off bits already known off in OP0, we need
2923 if ((nzop0
& ~val1
) == 0)
2926 nzop1
= nonzero_bits (trueop1
, mode
);
2927 /* If we are clearing all the nonzero bits, the result is zero. */
2928 if ((nzop1
& nzop0
) == 0
2929 && !side_effects_p (op0
) && !side_effects_p (op1
))
2930 return CONST0_RTX (mode
);
2932 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2933 && GET_MODE_CLASS (mode
) != MODE_CC
)
2936 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2937 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2938 && ! side_effects_p (op0
)
2939 && GET_MODE_CLASS (mode
) != MODE_CC
)
2940 return CONST0_RTX (mode
);
2942 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2943 there are no nonzero bits of C outside of X's mode. */
2944 if ((GET_CODE (op0
) == SIGN_EXTEND
2945 || GET_CODE (op0
) == ZERO_EXTEND
)
2946 && CONST_INT_P (trueop1
)
2947 && HWI_COMPUTABLE_MODE_P (mode
)
2948 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2949 & UINTVAL (trueop1
)) == 0)
2951 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2952 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2953 gen_int_mode (INTVAL (trueop1
),
2955 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2958 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2959 we might be able to further simplify the AND with X and potentially
2960 remove the truncation altogether. */
2961 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2963 rtx x
= XEXP (op0
, 0);
2964 enum machine_mode xmode
= GET_MODE (x
);
2965 tem
= simplify_gen_binary (AND
, xmode
, x
,
2966 gen_int_mode (INTVAL (trueop1
), xmode
));
2967 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2970 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2971 if (GET_CODE (op0
) == IOR
2972 && CONST_INT_P (trueop1
)
2973 && CONST_INT_P (XEXP (op0
, 1)))
2975 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2976 return simplify_gen_binary (IOR
, mode
,
2977 simplify_gen_binary (AND
, mode
,
2978 XEXP (op0
, 0), op1
),
2979 gen_int_mode (tmp
, mode
));
2982 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2983 insn (and may simplify more). */
2984 if (GET_CODE (op0
) == XOR
2985 && rtx_equal_p (XEXP (op0
, 0), op1
)
2986 && ! side_effects_p (op1
))
2987 return simplify_gen_binary (AND
, mode
,
2988 simplify_gen_unary (NOT
, mode
,
2989 XEXP (op0
, 1), mode
),
2992 if (GET_CODE (op0
) == XOR
2993 && rtx_equal_p (XEXP (op0
, 1), op1
)
2994 && ! side_effects_p (op1
))
2995 return simplify_gen_binary (AND
, mode
,
2996 simplify_gen_unary (NOT
, mode
,
2997 XEXP (op0
, 0), mode
),
3000 /* Similarly for (~(A ^ B)) & A. */
3001 if (GET_CODE (op0
) == NOT
3002 && GET_CODE (XEXP (op0
, 0)) == XOR
3003 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3004 && ! side_effects_p (op1
))
3005 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3007 if (GET_CODE (op0
) == NOT
3008 && GET_CODE (XEXP (op0
, 0)) == XOR
3009 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3010 && ! side_effects_p (op1
))
3011 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3013 /* Convert (A | B) & A to A. */
3014 if (GET_CODE (op0
) == IOR
3015 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3016 || rtx_equal_p (XEXP (op0
, 1), op1
))
3017 && ! side_effects_p (XEXP (op0
, 0))
3018 && ! side_effects_p (XEXP (op0
, 1)))
3021 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3022 ((A & N) + B) & M -> (A + B) & M
3023 Similarly if (N & M) == 0,
3024 ((A | N) + B) & M -> (A + B) & M
3025 and for - instead of + and/or ^ instead of |.
3026 Also, if (N & M) == 0, then
3027 (A +- N) & M -> A & M. */
3028 if (CONST_INT_P (trueop1
)
3029 && HWI_COMPUTABLE_MODE_P (mode
)
3030 && ~UINTVAL (trueop1
)
3031 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3032 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3037 pmop
[0] = XEXP (op0
, 0);
3038 pmop
[1] = XEXP (op0
, 1);
3040 if (CONST_INT_P (pmop
[1])
3041 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3042 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3044 for (which
= 0; which
< 2; which
++)
3047 switch (GET_CODE (tem
))
3050 if (CONST_INT_P (XEXP (tem
, 1))
3051 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3052 == UINTVAL (trueop1
))
3053 pmop
[which
] = XEXP (tem
, 0);
3057 if (CONST_INT_P (XEXP (tem
, 1))
3058 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3059 pmop
[which
] = XEXP (tem
, 0);
3066 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3068 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3070 return simplify_gen_binary (code
, mode
, tem
, op1
);
3074 /* (and X (ior (not X) Y) -> (and X Y) */
3075 if (GET_CODE (op1
) == IOR
3076 && GET_CODE (XEXP (op1
, 0)) == NOT
3077 && op0
== XEXP (XEXP (op1
, 0), 0))
3078 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3080 /* (and (ior (not X) Y) X) -> (and X Y) */
3081 if (GET_CODE (op0
) == IOR
3082 && GET_CODE (XEXP (op0
, 0)) == NOT
3083 && op1
== XEXP (XEXP (op0
, 0), 0))
3084 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3086 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3092 /* 0/x is 0 (or x&0 if x has side-effects). */
3093 if (trueop0
== CONST0_RTX (mode
))
3095 if (side_effects_p (op1
))
3096 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3100 if (trueop1
== CONST1_RTX (mode
))
3101 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3102 /* Convert divide by power of two into shift. */
3103 if (CONST_INT_P (trueop1
)
3104 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3105 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3109 /* Handle floating point and integers separately. */
3110 if (SCALAR_FLOAT_MODE_P (mode
))
3112 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3113 safe for modes with NaNs, since 0.0 / 0.0 will then be
3114 NaN rather than 0.0. Nor is it safe for modes with signed
3115 zeros, since dividing 0 by a negative number gives -0.0 */
3116 if (trueop0
== CONST0_RTX (mode
)
3117 && !HONOR_NANS (mode
)
3118 && !HONOR_SIGNED_ZEROS (mode
)
3119 && ! side_effects_p (op1
))
3122 if (trueop1
== CONST1_RTX (mode
)
3123 && !HONOR_SNANS (mode
))
3126 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3127 && trueop1
!= CONST0_RTX (mode
))
3130 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3133 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3134 && !HONOR_SNANS (mode
))
3135 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3137 /* Change FP division by a constant into multiplication.
3138 Only do this with -freciprocal-math. */
3139 if (flag_reciprocal_math
3140 && !REAL_VALUES_EQUAL (d
, dconst0
))
3142 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3143 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3144 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3148 else if (SCALAR_INT_MODE_P (mode
))
3150 /* 0/x is 0 (or x&0 if x has side-effects). */
3151 if (trueop0
== CONST0_RTX (mode
)
3152 && !cfun
->can_throw_non_call_exceptions
)
3154 if (side_effects_p (op1
))
3155 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3159 if (trueop1
== CONST1_RTX (mode
))
3160 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3162 if (trueop1
== constm1_rtx
)
3164 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3165 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3171 /* 0%x is 0 (or x&0 if x has side-effects). */
3172 if (trueop0
== CONST0_RTX (mode
))
3174 if (side_effects_p (op1
))
3175 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3178 /* x%1 is 0 (of x&0 if x has side-effects). */
3179 if (trueop1
== CONST1_RTX (mode
))
3181 if (side_effects_p (op0
))
3182 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3183 return CONST0_RTX (mode
);
3185 /* Implement modulus by power of two as AND. */
3186 if (CONST_INT_P (trueop1
)
3187 && exact_log2 (UINTVAL (trueop1
)) > 0)
3188 return simplify_gen_binary (AND
, mode
, op0
,
3189 GEN_INT (INTVAL (op1
) - 1));
3193 /* 0%x is 0 (or x&0 if x has side-effects). */
3194 if (trueop0
== CONST0_RTX (mode
))
3196 if (side_effects_p (op1
))
3197 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3200 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3201 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3203 if (side_effects_p (op0
))
3204 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3205 return CONST0_RTX (mode
);
3212 if (trueop1
== CONST0_RTX (mode
))
3214 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3216 /* Rotating ~0 always results in ~0. */
3217 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3218 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3219 && ! side_effects_p (op1
))
3222 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3224 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3225 if (val
!= INTVAL (op1
))
3226 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3233 if (trueop1
== CONST0_RTX (mode
))
3235 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3237 goto canonicalize_shift
;
3240 if (trueop1
== CONST0_RTX (mode
))
3242 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3244 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3245 if (GET_CODE (op0
) == CLZ
3246 && CONST_INT_P (trueop1
)
3247 && STORE_FLAG_VALUE
== 1
3248 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3250 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3251 unsigned HOST_WIDE_INT zero_val
= 0;
3253 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3254 && zero_val
== GET_MODE_PRECISION (imode
)
3255 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3256 return simplify_gen_relational (EQ
, mode
, imode
,
3257 XEXP (op0
, 0), const0_rtx
);
3259 goto canonicalize_shift
;
3262 if (width
<= HOST_BITS_PER_WIDE_INT
3263 && mode_signbit_p (mode
, trueop1
)
3264 && ! side_effects_p (op0
))
3266 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3268 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3274 if (width
<= HOST_BITS_PER_WIDE_INT
3275 && CONST_INT_P (trueop1
)
3276 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3277 && ! side_effects_p (op0
))
3279 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3281 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3287 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3289 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3291 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3297 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3299 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3301 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3314 /* ??? There are simplifications that can be done. */
3318 if (!VECTOR_MODE_P (mode
))
3320 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3321 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3322 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3323 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3324 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3326 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3327 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3330 /* Extract a scalar element from a nested VEC_SELECT expression
3331 (with optional nested VEC_CONCAT expression). Some targets
3332 (i386) extract scalar element from a vector using chain of
3333 nested VEC_SELECT expressions. When input operand is a memory
3334 operand, this operation can be simplified to a simple scalar
3335 load from an offseted memory address. */
3336 if (GET_CODE (trueop0
) == VEC_SELECT
)
3338 rtx op0
= XEXP (trueop0
, 0);
3339 rtx op1
= XEXP (trueop0
, 1);
3341 enum machine_mode opmode
= GET_MODE (op0
);
3342 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3343 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3345 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3351 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3352 gcc_assert (i
< n_elts
);
3354 /* Select element, pointed by nested selector. */
3355 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3357 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3358 if (GET_CODE (op0
) == VEC_CONCAT
)
3360 rtx op00
= XEXP (op0
, 0);
3361 rtx op01
= XEXP (op0
, 1);
3363 enum machine_mode mode00
, mode01
;
3364 int n_elts00
, n_elts01
;
3366 mode00
= GET_MODE (op00
);
3367 mode01
= GET_MODE (op01
);
3369 /* Find out number of elements of each operand. */
3370 if (VECTOR_MODE_P (mode00
))
3372 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3373 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3378 if (VECTOR_MODE_P (mode01
))
3380 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3381 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3386 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3388 /* Select correct operand of VEC_CONCAT
3389 and adjust selector. */
3390 if (elem
< n_elts01
)
3401 vec
= rtvec_alloc (1);
3402 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3404 tmp
= gen_rtx_fmt_ee (code
, mode
,
3405 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3408 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3409 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3410 return XEXP (trueop0
, 0);
3414 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3415 gcc_assert (GET_MODE_INNER (mode
)
3416 == GET_MODE_INNER (GET_MODE (trueop0
)));
3417 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3419 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3421 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3422 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3423 rtvec v
= rtvec_alloc (n_elts
);
3426 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3427 for (i
= 0; i
< n_elts
; i
++)
3429 rtx x
= XVECEXP (trueop1
, 0, i
);
3431 gcc_assert (CONST_INT_P (x
));
3432 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3436 return gen_rtx_CONST_VECTOR (mode
, v
);
3439 /* Recognize the identity. */
3440 if (GET_MODE (trueop0
) == mode
)
3442 bool maybe_ident
= true;
3443 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3445 rtx j
= XVECEXP (trueop1
, 0, i
);
3446 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3448 maybe_ident
= false;
3456 /* If we build {a,b} then permute it, build the result directly. */
3457 if (XVECLEN (trueop1
, 0) == 2
3458 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3459 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3460 && GET_CODE (trueop0
) == VEC_CONCAT
3461 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3462 && GET_MODE (XEXP (trueop0
, 0)) == mode
3463 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3464 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3466 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3467 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3470 gcc_assert (i0
< 4 && i1
< 4);
3471 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3472 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3474 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3477 if (XVECLEN (trueop1
, 0) == 2
3478 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3479 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3480 && GET_CODE (trueop0
) == VEC_CONCAT
3481 && GET_MODE (trueop0
) == mode
)
3483 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3484 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3487 gcc_assert (i0
< 2 && i1
< 2);
3488 subop0
= XEXP (trueop0
, i0
);
3489 subop1
= XEXP (trueop0
, i1
);
3491 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3495 if (XVECLEN (trueop1
, 0) == 1
3496 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3497 && GET_CODE (trueop0
) == VEC_CONCAT
)
3500 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3502 /* Try to find the element in the VEC_CONCAT. */
3503 while (GET_MODE (vec
) != mode
3504 && GET_CODE (vec
) == VEC_CONCAT
)
3506 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3507 if (offset
< vec_size
)
3508 vec
= XEXP (vec
, 0);
3512 vec
= XEXP (vec
, 1);
3514 vec
= avoid_constant_pool_reference (vec
);
3517 if (GET_MODE (vec
) == mode
)
3524 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3525 ? GET_MODE (trueop0
)
3526 : GET_MODE_INNER (mode
));
3527 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3528 ? GET_MODE (trueop1
)
3529 : GET_MODE_INNER (mode
));
3531 gcc_assert (VECTOR_MODE_P (mode
));
3532 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3533 == GET_MODE_SIZE (mode
));
3535 if (VECTOR_MODE_P (op0_mode
))
3536 gcc_assert (GET_MODE_INNER (mode
)
3537 == GET_MODE_INNER (op0_mode
));
3539 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3541 if (VECTOR_MODE_P (op1_mode
))
3542 gcc_assert (GET_MODE_INNER (mode
)
3543 == GET_MODE_INNER (op1_mode
));
3545 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3547 if ((GET_CODE (trueop0
) == CONST_VECTOR
3548 || CONST_INT_P (trueop0
) || CONST_DOUBLE_P (trueop0
))
3549 && (GET_CODE (trueop1
) == CONST_VECTOR
3550 || CONST_INT_P (trueop1
) || CONST_DOUBLE_P (trueop1
)))
3552 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3553 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3554 rtvec v
= rtvec_alloc (n_elts
);
3556 unsigned in_n_elts
= 1;
3558 if (VECTOR_MODE_P (op0_mode
))
3559 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3560 for (i
= 0; i
< n_elts
; i
++)
3564 if (!VECTOR_MODE_P (op0_mode
))
3565 RTVEC_ELT (v
, i
) = trueop0
;
3567 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3571 if (!VECTOR_MODE_P (op1_mode
))
3572 RTVEC_ELT (v
, i
) = trueop1
;
3574 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3579 return gen_rtx_CONST_VECTOR (mode
, v
);
3582 /* Try to merge VEC_SELECTs from the same vector into a single one. */
3583 if (GET_CODE (trueop0
) == VEC_SELECT
3584 && GET_CODE (trueop1
) == VEC_SELECT
3585 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0)))
3587 rtx par0
= XEXP (trueop0
, 1);
3588 rtx par1
= XEXP (trueop1
, 1);
3589 int len0
= XVECLEN (par0
, 0);
3590 int len1
= XVECLEN (par1
, 0);
3591 rtvec vec
= rtvec_alloc (len0
+ len1
);
3592 for (int i
= 0; i
< len0
; i
++)
3593 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3594 for (int i
= 0; i
< len1
; i
++)
3595 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3596 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3597 gen_rtx_PARALLEL (VOIDmode
, vec
));
3610 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3613 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3615 unsigned int width
= GET_MODE_PRECISION (mode
);
3617 if (VECTOR_MODE_P (mode
)
3618 && code
!= VEC_CONCAT
3619 && GET_CODE (op0
) == CONST_VECTOR
3620 && GET_CODE (op1
) == CONST_VECTOR
)
3622 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3623 enum machine_mode op0mode
= GET_MODE (op0
);
3624 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3625 enum machine_mode op1mode
= GET_MODE (op1
);
3626 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3627 rtvec v
= rtvec_alloc (n_elts
);
3630 gcc_assert (op0_n_elts
== n_elts
);
3631 gcc_assert (op1_n_elts
== n_elts
);
3632 for (i
= 0; i
< n_elts
; i
++)
3634 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3635 CONST_VECTOR_ELT (op0
, i
),
3636 CONST_VECTOR_ELT (op1
, i
));
3639 RTVEC_ELT (v
, i
) = x
;
3642 return gen_rtx_CONST_VECTOR (mode
, v
);
3645 if (VECTOR_MODE_P (mode
)
3646 && code
== VEC_CONCAT
3647 && (CONST_INT_P (op0
)
3648 || GET_CODE (op0
) == CONST_FIXED
3649 || CONST_DOUBLE_P (op0
))
3650 && (CONST_INT_P (op1
)
3651 || CONST_DOUBLE_P (op1
)
3652 || GET_CODE (op1
) == CONST_FIXED
))
3654 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3655 rtvec v
= rtvec_alloc (n_elts
);
3657 gcc_assert (n_elts
>= 2);
3660 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3661 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3663 RTVEC_ELT (v
, 0) = op0
;
3664 RTVEC_ELT (v
, 1) = op1
;
3668 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3669 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3672 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3673 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3674 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3676 for (i
= 0; i
< op0_n_elts
; ++i
)
3677 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3678 for (i
= 0; i
< op1_n_elts
; ++i
)
3679 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3682 return gen_rtx_CONST_VECTOR (mode
, v
);
3685 if (SCALAR_FLOAT_MODE_P (mode
)
3686 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3687 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3688 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3699 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3701 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3703 for (i
= 0; i
< 4; i
++)
3720 real_from_target (&r
, tmp0
, mode
);
3721 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3725 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3728 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3729 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3730 real_convert (&f0
, mode
, &f0
);
3731 real_convert (&f1
, mode
, &f1
);
3733 if (HONOR_SNANS (mode
)
3734 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3738 && REAL_VALUES_EQUAL (f1
, dconst0
)
3739 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3742 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3743 && flag_trapping_math
3744 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3746 int s0
= REAL_VALUE_NEGATIVE (f0
);
3747 int s1
= REAL_VALUE_NEGATIVE (f1
);
3752 /* Inf + -Inf = NaN plus exception. */
3757 /* Inf - Inf = NaN plus exception. */
3762 /* Inf / Inf = NaN plus exception. */
3769 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3770 && flag_trapping_math
3771 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3772 || (REAL_VALUE_ISINF (f1
)
3773 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3774 /* Inf * 0 = NaN plus exception. */
3777 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3779 real_convert (&result
, mode
, &value
);
3781 /* Don't constant fold this floating point operation if
3782 the result has overflowed and flag_trapping_math. */
3784 if (flag_trapping_math
3785 && MODE_HAS_INFINITIES (mode
)
3786 && REAL_VALUE_ISINF (result
)
3787 && !REAL_VALUE_ISINF (f0
)
3788 && !REAL_VALUE_ISINF (f1
))
3789 /* Overflow plus exception. */
3792 /* Don't constant fold this floating point operation if the
3793 result may dependent upon the run-time rounding mode and
3794 flag_rounding_math is set, or if GCC's software emulation
3795 is unable to accurately represent the result. */
3797 if ((flag_rounding_math
3798 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3799 && (inexact
|| !real_identical (&result
, &value
)))
3802 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3806 /* We can fold some multi-word operations. */
3807 if (GET_MODE_CLASS (mode
) == MODE_INT
3808 && width
== HOST_BITS_PER_DOUBLE_INT
3809 && (CONST_DOUBLE_AS_INT_P (op0
) || CONST_INT_P (op0
))
3810 && (CONST_DOUBLE_AS_INT_P (op1
) || CONST_INT_P (op1
)))
3812 double_int o0
, o1
, res
, tmp
;
3815 o0
= rtx_to_double_int (op0
);
3816 o1
= rtx_to_double_int (op1
);
3821 /* A - B == A + (-B). */
3824 /* Fall through.... */
3835 res
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
3842 tmp
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
3849 res
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
3856 tmp
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
3890 case LSHIFTRT
: case ASHIFTRT
:
3892 case ROTATE
: case ROTATERT
:
3894 unsigned HOST_WIDE_INT cnt
;
3896 if (SHIFT_COUNT_TRUNCATED
)
3899 o1
.low
&= GET_MODE_PRECISION (mode
) - 1;
3902 if (!o1
.fits_uhwi ()
3903 || o1
.to_uhwi () >= GET_MODE_PRECISION (mode
))
3906 cnt
= o1
.to_uhwi ();
3907 unsigned short prec
= GET_MODE_PRECISION (mode
);
3909 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3910 res
= o0
.rshift (cnt
, prec
, code
== ASHIFTRT
);
3911 else if (code
== ASHIFT
)
3912 res
= o0
.alshift (cnt
, prec
);
3913 else if (code
== ROTATE
)
3914 res
= o0
.lrotate (cnt
, prec
);
3915 else /* code == ROTATERT */
3916 res
= o0
.rrotate (cnt
, prec
);
3924 return immed_double_int_const (res
, mode
);
3927 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3928 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3930 /* Get the integer argument values in two forms:
3931 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3933 arg0
= INTVAL (op0
);
3934 arg1
= INTVAL (op1
);
3936 if (width
< HOST_BITS_PER_WIDE_INT
)
3938 arg0
&= GET_MODE_MASK (mode
);
3939 arg1
&= GET_MODE_MASK (mode
);
3942 if (val_signbit_known_set_p (mode
, arg0s
))
3943 arg0s
|= ~GET_MODE_MASK (mode
);
3946 if (val_signbit_known_set_p (mode
, arg1s
))
3947 arg1s
|= ~GET_MODE_MASK (mode
);
3955 /* Compute the value of the arithmetic. */
3960 val
= arg0s
+ arg1s
;
3964 val
= arg0s
- arg1s
;
3968 val
= arg0s
* arg1s
;
3973 || ((unsigned HOST_WIDE_INT
) arg0s
3974 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3977 val
= arg0s
/ arg1s
;
3982 || ((unsigned HOST_WIDE_INT
) arg0s
3983 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3986 val
= arg0s
% arg1s
;
3991 || ((unsigned HOST_WIDE_INT
) arg0s
3992 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3995 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
4000 || ((unsigned HOST_WIDE_INT
) arg0s
4001 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4004 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
4022 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4023 the value is in range. We can't return any old value for
4024 out-of-range arguments because either the middle-end (via
4025 shift_truncation_mask) or the back-end might be relying on
4026 target-specific knowledge. Nor can we rely on
4027 shift_truncation_mask, since the shift might not be part of an
4028 ashlM3, lshrM3 or ashrM3 instruction. */
4029 if (SHIFT_COUNT_TRUNCATED
)
4030 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
4031 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
4034 val
= (code
== ASHIFT
4035 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
4036 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
4038 /* Sign-extend the result for arithmetic right shifts. */
4039 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
4040 val
|= ((unsigned HOST_WIDE_INT
) (-1)) << (width
- arg1
);
4048 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
4049 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
4057 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
4058 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
4062 /* Do nothing here. */
4066 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
4070 val
= ((unsigned HOST_WIDE_INT
) arg0
4071 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4075 val
= arg0s
> arg1s
? arg0s
: arg1s
;
4079 val
= ((unsigned HOST_WIDE_INT
) arg0
4080 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4093 /* ??? There are simplifications that can be done. */
4100 return gen_int_mode (val
, mode
);
4108 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4111 Rather than test for specific case, we do this by a brute-force method
4112 and do all possible simplifications until no more changes occur. Then
4113 we rebuild the operation. */
4115 struct simplify_plus_minus_op_data
4122 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4126 result
= (commutative_operand_precedence (y
)
4127 - commutative_operand_precedence (x
));
4131 /* Group together equal REGs to do more simplification. */
4132 if (REG_P (x
) && REG_P (y
))
4133 return REGNO (x
) > REGNO (y
);
4139 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
4142 struct simplify_plus_minus_op_data ops
[8];
4144 int n_ops
= 2, input_ops
= 2;
4145 int changed
, n_constants
= 0, canonicalized
= 0;
4148 memset (ops
, 0, sizeof ops
);
4150 /* Set up the two operands and then expand them until nothing has been
4151 changed. If we run out of room in our array, give up; this should
4152 almost never happen. */
4157 ops
[1].neg
= (code
== MINUS
);
4163 for (i
= 0; i
< n_ops
; i
++)
4165 rtx this_op
= ops
[i
].op
;
4166 int this_neg
= ops
[i
].neg
;
4167 enum rtx_code this_code
= GET_CODE (this_op
);
4176 ops
[n_ops
].op
= XEXP (this_op
, 1);
4177 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4180 ops
[i
].op
= XEXP (this_op
, 0);
4183 canonicalized
|= this_neg
;
4187 ops
[i
].op
= XEXP (this_op
, 0);
4188 ops
[i
].neg
= ! this_neg
;
4195 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4196 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4197 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4199 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4200 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4201 ops
[n_ops
].neg
= this_neg
;
4209 /* ~a -> (-a - 1) */
4212 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4213 ops
[n_ops
++].neg
= this_neg
;
4214 ops
[i
].op
= XEXP (this_op
, 0);
4215 ops
[i
].neg
= !this_neg
;
4225 ops
[i
].op
= neg_const_int (mode
, this_op
);
4239 if (n_constants
> 1)
4242 gcc_assert (n_ops
>= 2);
4244 /* If we only have two operands, we can avoid the loops. */
4247 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4250 /* Get the two operands. Be careful with the order, especially for
4251 the cases where code == MINUS. */
4252 if (ops
[0].neg
&& ops
[1].neg
)
4254 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4257 else if (ops
[0].neg
)
4268 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4271 /* Now simplify each pair of operands until nothing changes. */
4274 /* Insertion sort is good enough for an eight-element array. */
4275 for (i
= 1; i
< n_ops
; i
++)
4277 struct simplify_plus_minus_op_data save
;
4279 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4285 ops
[j
+ 1] = ops
[j
];
4286 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4291 for (i
= n_ops
- 1; i
> 0; i
--)
4292 for (j
= i
- 1; j
>= 0; j
--)
4294 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4295 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4297 if (lhs
!= 0 && rhs
!= 0)
4299 enum rtx_code ncode
= PLUS
;
4305 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4307 else if (swap_commutative_operands_p (lhs
, rhs
))
4308 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4310 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4311 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4313 rtx tem_lhs
, tem_rhs
;
4315 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4316 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4317 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4319 if (tem
&& !CONSTANT_P (tem
))
4320 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4323 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4325 /* Reject "simplifications" that just wrap the two
4326 arguments in a CONST. Failure to do so can result
4327 in infinite recursion with simplify_binary_operation
4328 when it calls us to simplify CONST operations. */
4330 && ! (GET_CODE (tem
) == CONST
4331 && GET_CODE (XEXP (tem
, 0)) == ncode
4332 && XEXP (XEXP (tem
, 0), 0) == lhs
4333 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4336 if (GET_CODE (tem
) == NEG
)
4337 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4338 if (CONST_INT_P (tem
) && lneg
)
4339 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4343 ops
[j
].op
= NULL_RTX
;
4350 /* If nothing changed, fail. */
4354 /* Pack all the operands to the lower-numbered entries. */
4355 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4365 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4367 && CONST_INT_P (ops
[1].op
)
4368 && CONSTANT_P (ops
[0].op
)
4370 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4372 /* We suppressed creation of trivial CONST expressions in the
4373 combination loop to avoid recursion. Create one manually now.
4374 The combination loop should have ensured that there is exactly
4375 one CONST_INT, and the sort will have ensured that it is last
4376 in the array and that any other constant will be next-to-last. */
4379 && CONST_INT_P (ops
[n_ops
- 1].op
)
4380 && CONSTANT_P (ops
[n_ops
- 2].op
))
4382 rtx value
= ops
[n_ops
- 1].op
;
4383 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4384 value
= neg_const_int (mode
, value
);
4385 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4390 /* Put a non-negated operand first, if possible. */
4392 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4395 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4404 /* Now make the result by performing the requested operations. */
4406 for (i
= 1; i
< n_ops
; i
++)
4407 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4408 mode
, result
, ops
[i
].op
);
4413 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4415 plus_minus_operand_p (const_rtx x
)
4417 return GET_CODE (x
) == PLUS
4418 || GET_CODE (x
) == MINUS
4419 || (GET_CODE (x
) == CONST
4420 && GET_CODE (XEXP (x
, 0)) == PLUS
4421 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4422 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4425 /* Like simplify_binary_operation except used for relational operators.
4426 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4427 not also be VOIDmode.
4429 CMP_MODE specifies in which mode the comparison is done in, so it is
4430 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4431 the operands or, if both are VOIDmode, the operands are compared in
4432 "infinite precision". */
4434 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4435 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4437 rtx tem
, trueop0
, trueop1
;
4439 if (cmp_mode
== VOIDmode
)
4440 cmp_mode
= GET_MODE (op0
);
4441 if (cmp_mode
== VOIDmode
)
4442 cmp_mode
= GET_MODE (op1
);
4444 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4447 if (SCALAR_FLOAT_MODE_P (mode
))
4449 if (tem
== const0_rtx
)
4450 return CONST0_RTX (mode
);
4451 #ifdef FLOAT_STORE_FLAG_VALUE
4453 REAL_VALUE_TYPE val
;
4454 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4455 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4461 if (VECTOR_MODE_P (mode
))
4463 if (tem
== const0_rtx
)
4464 return CONST0_RTX (mode
);
4465 #ifdef VECTOR_STORE_FLAG_VALUE
4470 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4471 if (val
== NULL_RTX
)
4473 if (val
== const1_rtx
)
4474 return CONST1_RTX (mode
);
4476 units
= GET_MODE_NUNITS (mode
);
4477 v
= rtvec_alloc (units
);
4478 for (i
= 0; i
< units
; i
++)
4479 RTVEC_ELT (v
, i
) = val
;
4480 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4490 /* For the following tests, ensure const0_rtx is op1. */
4491 if (swap_commutative_operands_p (op0
, op1
)
4492 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4493 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4495 /* If op0 is a compare, extract the comparison arguments from it. */
4496 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4497 return simplify_gen_relational (code
, mode
, VOIDmode
,
4498 XEXP (op0
, 0), XEXP (op0
, 1));
4500 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4504 trueop0
= avoid_constant_pool_reference (op0
);
4505 trueop1
= avoid_constant_pool_reference (op1
);
4506 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4510 /* This part of simplify_relational_operation is only used when CMP_MODE
4511 is not in class MODE_CC (i.e. it is a real comparison).
4513 MODE is the mode of the result, while CMP_MODE specifies in which
4514 mode the comparison is done in, so it is the mode of the operands. */
4517 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4518 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4520 enum rtx_code op0code
= GET_CODE (op0
);
4522 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4524 /* If op0 is a comparison, extract the comparison arguments
4528 if (GET_MODE (op0
) == mode
)
4529 return simplify_rtx (op0
);
4531 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4532 XEXP (op0
, 0), XEXP (op0
, 1));
4534 else if (code
== EQ
)
4536 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4537 if (new_code
!= UNKNOWN
)
4538 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4539 XEXP (op0
, 0), XEXP (op0
, 1));
4543 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4544 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4545 if ((code
== LTU
|| code
== GEU
)
4546 && GET_CODE (op0
) == PLUS
4547 && CONST_INT_P (XEXP (op0
, 1))
4548 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4549 || rtx_equal_p (op1
, XEXP (op0
, 1))))
4552 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4553 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4554 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4557 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4558 if ((code
== LTU
|| code
== GEU
)
4559 && GET_CODE (op0
) == PLUS
4560 && rtx_equal_p (op1
, XEXP (op0
, 1))
4561 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4562 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4563 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4564 copy_rtx (XEXP (op0
, 0)));
4566 if (op1
== const0_rtx
)
4568 /* Canonicalize (GTU x 0) as (NE x 0). */
4570 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4571 /* Canonicalize (LEU x 0) as (EQ x 0). */
4573 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4575 else if (op1
== const1_rtx
)
4580 /* Canonicalize (GE x 1) as (GT x 0). */
4581 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4584 /* Canonicalize (GEU x 1) as (NE x 0). */
4585 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4588 /* Canonicalize (LT x 1) as (LE x 0). */
4589 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4592 /* Canonicalize (LTU x 1) as (EQ x 0). */
4593 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4599 else if (op1
== constm1_rtx
)
4601 /* Canonicalize (LE x -1) as (LT x 0). */
4603 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4604 /* Canonicalize (GT x -1) as (GE x 0). */
4606 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4609 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4610 if ((code
== EQ
|| code
== NE
)
4611 && (op0code
== PLUS
|| op0code
== MINUS
)
4613 && CONSTANT_P (XEXP (op0
, 1))
4614 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4616 rtx x
= XEXP (op0
, 0);
4617 rtx c
= XEXP (op0
, 1);
4618 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4619 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4621 /* Detect an infinite recursive condition, where we oscillate at this
4622 simplification case between:
4623 A + B == C <---> C - B == A,
4624 where A, B, and C are all constants with non-simplifiable expressions,
4625 usually SYMBOL_REFs. */
4626 if (GET_CODE (tem
) == invcode
4628 && rtx_equal_p (c
, XEXP (tem
, 1)))
4631 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4634 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4635 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4637 && op1
== const0_rtx
4638 && GET_MODE_CLASS (mode
) == MODE_INT
4639 && cmp_mode
!= VOIDmode
4640 /* ??? Work-around BImode bugs in the ia64 backend. */
4642 && cmp_mode
!= BImode
4643 && nonzero_bits (op0
, cmp_mode
) == 1
4644 && STORE_FLAG_VALUE
== 1)
4645 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4646 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4647 : lowpart_subreg (mode
, op0
, cmp_mode
);
4649 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4650 if ((code
== EQ
|| code
== NE
)
4651 && op1
== const0_rtx
4653 return simplify_gen_relational (code
, mode
, cmp_mode
,
4654 XEXP (op0
, 0), XEXP (op0
, 1));
4656 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4657 if ((code
== EQ
|| code
== NE
)
4659 && rtx_equal_p (XEXP (op0
, 0), op1
)
4660 && !side_effects_p (XEXP (op0
, 0)))
4661 return simplify_gen_relational (code
, mode
, cmp_mode
,
4662 XEXP (op0
, 1), const0_rtx
);
4664 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4665 if ((code
== EQ
|| code
== NE
)
4667 && rtx_equal_p (XEXP (op0
, 1), op1
)
4668 && !side_effects_p (XEXP (op0
, 1)))
4669 return simplify_gen_relational (code
, mode
, cmp_mode
,
4670 XEXP (op0
, 0), const0_rtx
);
4672 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4673 if ((code
== EQ
|| code
== NE
)
4675 && (CONST_INT_P (op1
) || CONST_DOUBLE_AS_INT_P (op1
))
4676 && (CONST_INT_P (XEXP (op0
, 1))
4677 || CONST_DOUBLE_AS_INT_P (XEXP (op0
, 1))))
4678 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4679 simplify_gen_binary (XOR
, cmp_mode
,
4680 XEXP (op0
, 1), op1
));
4682 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4688 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4689 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4690 XEXP (op0
, 0), const0_rtx
);
4695 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4696 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4697 XEXP (op0
, 0), const0_rtx
);
4716 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4717 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4718 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4719 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4720 For floating-point comparisons, assume that the operands were ordered. */
4723 comparison_result (enum rtx_code code
, int known_results
)
4729 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4732 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4736 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4739 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4743 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4746 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4749 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4751 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4754 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4756 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4759 return const_true_rtx
;
4767 /* Check if the given comparison (done in the given MODE) is actually a
4768 tautology or a contradiction.
4769 If no simplification is possible, this function returns zero.
4770 Otherwise, it returns either const_true_rtx or const0_rtx. */
4773 simplify_const_relational_operation (enum rtx_code code
,
4774 enum machine_mode mode
,
4781 gcc_assert (mode
!= VOIDmode
4782 || (GET_MODE (op0
) == VOIDmode
4783 && GET_MODE (op1
) == VOIDmode
));
4785 /* If op0 is a compare, extract the comparison arguments from it. */
4786 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4788 op1
= XEXP (op0
, 1);
4789 op0
= XEXP (op0
, 0);
4791 if (GET_MODE (op0
) != VOIDmode
)
4792 mode
= GET_MODE (op0
);
4793 else if (GET_MODE (op1
) != VOIDmode
)
4794 mode
= GET_MODE (op1
);
4799 /* We can't simplify MODE_CC values since we don't know what the
4800 actual comparison is. */
4801 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4804 /* Make sure the constant is second. */
4805 if (swap_commutative_operands_p (op0
, op1
))
4807 tem
= op0
, op0
= op1
, op1
= tem
;
4808 code
= swap_condition (code
);
4811 trueop0
= avoid_constant_pool_reference (op0
);
4812 trueop1
= avoid_constant_pool_reference (op1
);
4814 /* For integer comparisons of A and B maybe we can simplify A - B and can
4815 then simplify a comparison of that with zero. If A and B are both either
4816 a register or a CONST_INT, this can't help; testing for these cases will
4817 prevent infinite recursion here and speed things up.
4819 We can only do this for EQ and NE comparisons as otherwise we may
4820 lose or introduce overflow which we cannot disregard as undefined as
4821 we do not know the signedness of the operation on either the left or
4822 the right hand side of the comparison. */
4824 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4825 && (code
== EQ
|| code
== NE
)
4826 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4827 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4828 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4829 /* We cannot do this if tem is a nonzero address. */
4830 && ! nonzero_address_p (tem
))
4831 return simplify_const_relational_operation (signed_condition (code
),
4832 mode
, tem
, const0_rtx
);
4834 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4835 return const_true_rtx
;
4837 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4840 /* For modes without NaNs, if the two operands are equal, we know the
4841 result except if they have side-effects. Even with NaNs we know
4842 the result of unordered comparisons and, if signaling NaNs are
4843 irrelevant, also the result of LT/GT/LTGT. */
4844 if ((! HONOR_NANS (GET_MODE (trueop0
))
4845 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4846 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4847 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4848 && rtx_equal_p (trueop0
, trueop1
)
4849 && ! side_effects_p (trueop0
))
4850 return comparison_result (code
, CMP_EQ
);
4852 /* If the operands are floating-point constants, see if we can fold
4854 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4855 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4856 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4858 REAL_VALUE_TYPE d0
, d1
;
4860 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4861 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4863 /* Comparisons are unordered iff at least one of the values is NaN. */
4864 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4874 return const_true_rtx
;
4887 return comparison_result (code
,
4888 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4889 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4892 /* Otherwise, see if the operands are both integers. */
4893 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4894 && (CONST_DOUBLE_AS_INT_P (trueop0
) || CONST_INT_P (trueop0
))
4895 && (CONST_DOUBLE_AS_INT_P (trueop1
) || CONST_INT_P (trueop1
)))
4897 int width
= GET_MODE_PRECISION (mode
);
4898 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4899 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4901 /* Get the two words comprising each integer constant. */
4902 if (CONST_DOUBLE_AS_INT_P (trueop0
))
4904 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4905 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4909 l0u
= l0s
= INTVAL (trueop0
);
4910 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4913 if (CONST_DOUBLE_AS_INT_P (trueop1
))
4915 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4916 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4920 l1u
= l1s
= INTVAL (trueop1
);
4921 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4924 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4925 we have to sign or zero-extend the values. */
4926 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4928 l0u
&= GET_MODE_MASK (mode
);
4929 l1u
&= GET_MODE_MASK (mode
);
4931 if (val_signbit_known_set_p (mode
, l0s
))
4932 l0s
|= ~GET_MODE_MASK (mode
);
4934 if (val_signbit_known_set_p (mode
, l1s
))
4935 l1s
|= ~GET_MODE_MASK (mode
);
4937 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4938 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4940 if (h0u
== h1u
&& l0u
== l1u
)
4941 return comparison_result (code
, CMP_EQ
);
4945 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4946 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4947 return comparison_result (code
, cr
);
4951 /* Optimize comparisons with upper and lower bounds. */
4952 if (HWI_COMPUTABLE_MODE_P (mode
)
4953 && CONST_INT_P (trueop1
))
4956 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4957 HOST_WIDE_INT val
= INTVAL (trueop1
);
4958 HOST_WIDE_INT mmin
, mmax
;
4968 /* Get a reduced range if the sign bit is zero. */
4969 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4976 rtx mmin_rtx
, mmax_rtx
;
4977 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4979 mmin
= INTVAL (mmin_rtx
);
4980 mmax
= INTVAL (mmax_rtx
);
4983 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4985 mmin
>>= (sign_copies
- 1);
4986 mmax
>>= (sign_copies
- 1);
4992 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4994 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4995 return const_true_rtx
;
4996 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5001 return const_true_rtx
;
5006 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5008 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5009 return const_true_rtx
;
5010 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5015 return const_true_rtx
;
5021 /* x == y is always false for y out of range. */
5022 if (val
< mmin
|| val
> mmax
)
5026 /* x > y is always false for y >= mmax, always true for y < mmin. */
5028 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5030 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5031 return const_true_rtx
;
5037 return const_true_rtx
;
5040 /* x < y is always false for y <= mmin, always true for y > mmax. */
5042 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5044 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5045 return const_true_rtx
;
5051 return const_true_rtx
;
5055 /* x != y is always true for y out of range. */
5056 if (val
< mmin
|| val
> mmax
)
5057 return const_true_rtx
;
5065 /* Optimize integer comparisons with zero. */
5066 if (trueop1
== const0_rtx
)
5068 /* Some addresses are known to be nonzero. We don't know
5069 their sign, but equality comparisons are known. */
5070 if (nonzero_address_p (trueop0
))
5072 if (code
== EQ
|| code
== LEU
)
5074 if (code
== NE
|| code
== GTU
)
5075 return const_true_rtx
;
5078 /* See if the first operand is an IOR with a constant. If so, we
5079 may be able to determine the result of this comparison. */
5080 if (GET_CODE (op0
) == IOR
)
5082 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5083 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5085 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5086 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5087 && (UINTVAL (inner_const
)
5088 & ((unsigned HOST_WIDE_INT
) 1
5098 return const_true_rtx
;
5102 return const_true_rtx
;
5116 /* Optimize comparison of ABS with zero. */
5117 if (trueop1
== CONST0_RTX (mode
)
5118 && (GET_CODE (trueop0
) == ABS
5119 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5120 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5125 /* Optimize abs(x) < 0.0. */
5126 if (!HONOR_SNANS (mode
)
5127 && (!INTEGRAL_MODE_P (mode
)
5128 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5130 if (INTEGRAL_MODE_P (mode
)
5131 && (issue_strict_overflow_warning
5132 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5133 warning (OPT_Wstrict_overflow
,
5134 ("assuming signed overflow does not occur when "
5135 "assuming abs (x) < 0 is false"));
5141 /* Optimize abs(x) >= 0.0. */
5142 if (!HONOR_NANS (mode
)
5143 && (!INTEGRAL_MODE_P (mode
)
5144 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5146 if (INTEGRAL_MODE_P (mode
)
5147 && (issue_strict_overflow_warning
5148 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5149 warning (OPT_Wstrict_overflow
,
5150 ("assuming signed overflow does not occur when "
5151 "assuming abs (x) >= 0 is true"));
5152 return const_true_rtx
;
5157 /* Optimize ! (abs(x) < 0.0). */
5158 return const_true_rtx
;
5168 /* Simplify CODE, an operation with result mode MODE and three operands,
5169 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5170 a constant. Return 0 if no simplifications is possible. */
5173 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
5174 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
5177 unsigned int width
= GET_MODE_PRECISION (mode
);
5178 bool any_change
= false;
5181 /* VOIDmode means "infinite" precision. */
5183 width
= HOST_BITS_PER_WIDE_INT
;
5188 /* Simplify negations around the multiplication. */
5189 /* -a * -b + c => a * b + c. */
5190 if (GET_CODE (op0
) == NEG
)
5192 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5194 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5196 else if (GET_CODE (op1
) == NEG
)
5198 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5200 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5203 /* Canonicalize the two multiplication operands. */
5204 /* a * -b + c => -b * a + c. */
5205 if (swap_commutative_operands_p (op0
, op1
))
5206 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5209 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5214 if (CONST_INT_P (op0
)
5215 && CONST_INT_P (op1
)
5216 && CONST_INT_P (op2
)
5217 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5218 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5220 /* Extracting a bit-field from a constant */
5221 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5222 HOST_WIDE_INT op1val
= INTVAL (op1
);
5223 HOST_WIDE_INT op2val
= INTVAL (op2
);
5224 if (BITS_BIG_ENDIAN
)
5225 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5229 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5231 /* First zero-extend. */
5232 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5233 /* If desired, propagate sign bit. */
5234 if (code
== SIGN_EXTRACT
5235 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5237 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5240 return gen_int_mode (val
, mode
);
5245 if (CONST_INT_P (op0
))
5246 return op0
!= const0_rtx
? op1
: op2
;
5248 /* Convert c ? a : a into "a". */
5249 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5252 /* Convert a != b ? a : b into "a". */
5253 if (GET_CODE (op0
) == NE
5254 && ! side_effects_p (op0
)
5255 && ! HONOR_NANS (mode
)
5256 && ! HONOR_SIGNED_ZEROS (mode
)
5257 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5258 && rtx_equal_p (XEXP (op0
, 1), op2
))
5259 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5260 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5263 /* Convert a == b ? a : b into "b". */
5264 if (GET_CODE (op0
) == EQ
5265 && ! side_effects_p (op0
)
5266 && ! HONOR_NANS (mode
)
5267 && ! HONOR_SIGNED_ZEROS (mode
)
5268 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5269 && rtx_equal_p (XEXP (op0
, 1), op2
))
5270 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5271 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5274 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5276 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5277 ? GET_MODE (XEXP (op0
, 1))
5278 : GET_MODE (XEXP (op0
, 0)));
5281 /* Look for happy constants in op1 and op2. */
5282 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5284 HOST_WIDE_INT t
= INTVAL (op1
);
5285 HOST_WIDE_INT f
= INTVAL (op2
);
5287 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5288 code
= GET_CODE (op0
);
5289 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5292 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5300 return simplify_gen_relational (code
, mode
, cmp_mode
,
5301 XEXP (op0
, 0), XEXP (op0
, 1));
5304 if (cmp_mode
== VOIDmode
)
5305 cmp_mode
= op0_mode
;
5306 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5307 cmp_mode
, XEXP (op0
, 0),
5310 /* See if any simplifications were possible. */
5313 if (CONST_INT_P (temp
))
5314 return temp
== const0_rtx
? op2
: op1
;
5316 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5322 gcc_assert (GET_MODE (op0
) == mode
);
5323 gcc_assert (GET_MODE (op1
) == mode
);
5324 gcc_assert (VECTOR_MODE_P (mode
));
5325 op2
= avoid_constant_pool_reference (op2
);
5326 if (CONST_INT_P (op2
))
5328 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5329 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5330 int mask
= (1 << n_elts
) - 1;
5332 if (!(INTVAL (op2
) & mask
))
5334 if ((INTVAL (op2
) & mask
) == mask
)
5337 op0
= avoid_constant_pool_reference (op0
);
5338 op1
= avoid_constant_pool_reference (op1
);
5339 if (GET_CODE (op0
) == CONST_VECTOR
5340 && GET_CODE (op1
) == CONST_VECTOR
)
5342 rtvec v
= rtvec_alloc (n_elts
);
5345 for (i
= 0; i
< n_elts
; i
++)
5346 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
5347 ? CONST_VECTOR_ELT (op0
, i
)
5348 : CONST_VECTOR_ELT (op1
, i
));
5349 return gen_rtx_CONST_VECTOR (mode
, v
);
5361 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5363 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5365 Works by unpacking OP into a collection of 8-bit values
5366 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5367 and then repacking them again for OUTERMODE. */
5370 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5371 enum machine_mode innermode
, unsigned int byte
)
5373 /* We support up to 512-bit values (for V8DFmode). */
5377 value_mask
= (1 << value_bit
) - 1
5379 unsigned char value
[max_bitsize
/ value_bit
];
5388 rtvec result_v
= NULL
;
5389 enum mode_class outer_class
;
5390 enum machine_mode outer_submode
;
5392 /* Some ports misuse CCmode. */
5393 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5396 /* We have no way to represent a complex constant at the rtl level. */
5397 if (COMPLEX_MODE_P (outermode
))
5400 /* Unpack the value. */
5402 if (GET_CODE (op
) == CONST_VECTOR
)
5404 num_elem
= CONST_VECTOR_NUNITS (op
);
5405 elems
= &CONST_VECTOR_ELT (op
, 0);
5406 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5412 elem_bitsize
= max_bitsize
;
5414 /* If this asserts, it is too complicated; reducing value_bit may help. */
5415 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5416 /* I don't know how to handle endianness of sub-units. */
5417 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5419 for (elem
= 0; elem
< num_elem
; elem
++)
5422 rtx el
= elems
[elem
];
5424 /* Vectors are kept in target memory order. (This is probably
5427 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5428 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5430 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5431 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5432 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5433 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5434 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5437 switch (GET_CODE (el
))
5441 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5443 *vp
++ = INTVAL (el
) >> i
;
5444 /* CONST_INTs are always logically sign-extended. */
5445 for (; i
< elem_bitsize
; i
+= value_bit
)
5446 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5450 if (GET_MODE (el
) == VOIDmode
)
5452 unsigned char extend
= 0;
5453 /* If this triggers, someone should have generated a
5454 CONST_INT instead. */
5455 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5457 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5458 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5459 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5462 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5466 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5468 for (; i
< elem_bitsize
; i
+= value_bit
)
5473 long tmp
[max_bitsize
/ 32];
5474 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5476 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5477 gcc_assert (bitsize
<= elem_bitsize
);
5478 gcc_assert (bitsize
% value_bit
== 0);
5480 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5483 /* real_to_target produces its result in words affected by
5484 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5485 and use WORDS_BIG_ENDIAN instead; see the documentation
5486 of SUBREG in rtl.texi. */
5487 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5490 if (WORDS_BIG_ENDIAN
)
5491 ibase
= bitsize
- 1 - i
;
5494 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5497 /* It shouldn't matter what's done here, so fill it with
5499 for (; i
< elem_bitsize
; i
+= value_bit
)
5505 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5507 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5508 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5512 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5513 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5514 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5516 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5517 >> (i
- HOST_BITS_PER_WIDE_INT
);
5518 for (; i
< elem_bitsize
; i
+= value_bit
)
5528 /* Now, pick the right byte to start with. */
5529 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5530 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5531 will already have offset 0. */
5532 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5534 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5536 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5537 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5538 byte
= (subword_byte
% UNITS_PER_WORD
5539 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5542 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5543 so if it's become negative it will instead be very large.) */
5544 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5546 /* Convert from bytes to chunks of size value_bit. */
5547 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5549 /* Re-pack the value. */
5551 if (VECTOR_MODE_P (outermode
))
5553 num_elem
= GET_MODE_NUNITS (outermode
);
5554 result_v
= rtvec_alloc (num_elem
);
5555 elems
= &RTVEC_ELT (result_v
, 0);
5556 outer_submode
= GET_MODE_INNER (outermode
);
5562 outer_submode
= outermode
;
5565 outer_class
= GET_MODE_CLASS (outer_submode
);
5566 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5568 gcc_assert (elem_bitsize
% value_bit
== 0);
5569 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5571 for (elem
= 0; elem
< num_elem
; elem
++)
5575 /* Vectors are stored in target memory order. (This is probably
5578 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5579 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5581 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5582 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5583 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5584 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5585 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5588 switch (outer_class
)
5591 case MODE_PARTIAL_INT
:
5593 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5596 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5598 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5599 for (; i
< elem_bitsize
; i
+= value_bit
)
5600 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5601 << (i
- HOST_BITS_PER_WIDE_INT
);
5603 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5605 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5606 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5607 else if (elem_bitsize
<= HOST_BITS_PER_DOUBLE_INT
)
5608 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5615 case MODE_DECIMAL_FLOAT
:
5618 long tmp
[max_bitsize
/ 32];
5620 /* real_from_target wants its input in words affected by
5621 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5622 and use WORDS_BIG_ENDIAN instead; see the documentation
5623 of SUBREG in rtl.texi. */
5624 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5626 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5629 if (WORDS_BIG_ENDIAN
)
5630 ibase
= elem_bitsize
- 1 - i
;
5633 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5636 real_from_target (&r
, tmp
, outer_submode
);
5637 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5649 f
.mode
= outer_submode
;
5652 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5654 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5655 for (; i
< elem_bitsize
; i
+= value_bit
)
5656 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5657 << (i
- HOST_BITS_PER_WIDE_INT
));
5659 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5667 if (VECTOR_MODE_P (outermode
))
5668 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5673 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5674 Return 0 if no simplifications are possible. */
5676 simplify_subreg (enum machine_mode outermode
, rtx op
,
5677 enum machine_mode innermode
, unsigned int byte
)
5679 /* Little bit of sanity checking. */
5680 gcc_assert (innermode
!= VOIDmode
);
5681 gcc_assert (outermode
!= VOIDmode
);
5682 gcc_assert (innermode
!= BLKmode
);
5683 gcc_assert (outermode
!= BLKmode
);
5685 gcc_assert (GET_MODE (op
) == innermode
5686 || GET_MODE (op
) == VOIDmode
);
5688 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
5689 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5691 if (outermode
== innermode
&& !byte
)
5694 if (CONST_INT_P (op
)
5695 || CONST_DOUBLE_P (op
)
5696 || GET_CODE (op
) == CONST_FIXED
5697 || GET_CODE (op
) == CONST_VECTOR
)
5698 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5700 /* Changing mode twice with SUBREG => just change it once,
5701 or not at all if changing back op starting mode. */
5702 if (GET_CODE (op
) == SUBREG
)
5704 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5705 int final_offset
= byte
+ SUBREG_BYTE (op
);
5708 if (outermode
== innermostmode
5709 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5710 return SUBREG_REG (op
);
5712 /* The SUBREG_BYTE represents offset, as if the value were stored
5713 in memory. Irritating exception is paradoxical subreg, where
5714 we define SUBREG_BYTE to be 0. On big endian machines, this
5715 value should be negative. For a moment, undo this exception. */
5716 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5718 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5719 if (WORDS_BIG_ENDIAN
)
5720 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5721 if (BYTES_BIG_ENDIAN
)
5722 final_offset
+= difference
% UNITS_PER_WORD
;
5724 if (SUBREG_BYTE (op
) == 0
5725 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5727 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5728 if (WORDS_BIG_ENDIAN
)
5729 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5730 if (BYTES_BIG_ENDIAN
)
5731 final_offset
+= difference
% UNITS_PER_WORD
;
5734 /* See whether resulting subreg will be paradoxical. */
5735 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5737 /* In nonparadoxical subregs we can't handle negative offsets. */
5738 if (final_offset
< 0)
5740 /* Bail out in case resulting subreg would be incorrect. */
5741 if (final_offset
% GET_MODE_SIZE (outermode
)
5742 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5748 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5750 /* In paradoxical subreg, see if we are still looking on lower part.
5751 If so, our SUBREG_BYTE will be 0. */
5752 if (WORDS_BIG_ENDIAN
)
5753 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5754 if (BYTES_BIG_ENDIAN
)
5755 offset
+= difference
% UNITS_PER_WORD
;
5756 if (offset
== final_offset
)
5762 /* Recurse for further possible simplifications. */
5763 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5767 if (validate_subreg (outermode
, innermostmode
,
5768 SUBREG_REG (op
), final_offset
))
5770 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5771 if (SUBREG_PROMOTED_VAR_P (op
)
5772 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5773 && GET_MODE_CLASS (outermode
) == MODE_INT
5774 && IN_RANGE (GET_MODE_SIZE (outermode
),
5775 GET_MODE_SIZE (innermode
),
5776 GET_MODE_SIZE (innermostmode
))
5777 && subreg_lowpart_p (newx
))
5779 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5780 SUBREG_PROMOTED_UNSIGNED_SET
5781 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5788 /* SUBREG of a hard register => just change the register number
5789 and/or mode. If the hard register is not valid in that mode,
5790 suppress this simplification. If the hard register is the stack,
5791 frame, or argument pointer, leave this as a SUBREG. */
5793 if (REG_P (op
) && HARD_REGISTER_P (op
))
5795 unsigned int regno
, final_regno
;
5798 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5799 if (HARD_REGISTER_NUM_P (final_regno
))
5802 int final_offset
= byte
;
5804 /* Adjust offset for paradoxical subregs. */
5806 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5808 int difference
= (GET_MODE_SIZE (innermode
)
5809 - GET_MODE_SIZE (outermode
));
5810 if (WORDS_BIG_ENDIAN
)
5811 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5812 if (BYTES_BIG_ENDIAN
)
5813 final_offset
+= difference
% UNITS_PER_WORD
;
5816 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5818 /* Propagate original regno. We don't have any way to specify
5819 the offset inside original regno, so do so only for lowpart.
5820 The information is used only by alias analysis that can not
5821 grog partial register anyway. */
5823 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5824 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5829 /* If we have a SUBREG of a register that we are replacing and we are
5830 replacing it with a MEM, make a new MEM and try replacing the
5831 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5832 or if we would be widening it. */
5835 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5836 /* Allow splitting of volatile memory references in case we don't
5837 have instruction to move the whole thing. */
5838 && (! MEM_VOLATILE_P (op
)
5839 || ! have_insn_for (SET
, innermode
))
5840 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5841 return adjust_address_nv (op
, outermode
, byte
);
5843 /* Handle complex values represented as CONCAT
5844 of real and imaginary part. */
5845 if (GET_CODE (op
) == CONCAT
)
5847 unsigned int part_size
, final_offset
;
5850 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5851 if (byte
< part_size
)
5853 part
= XEXP (op
, 0);
5854 final_offset
= byte
;
5858 part
= XEXP (op
, 1);
5859 final_offset
= byte
- part_size
;
5862 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5865 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5868 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5869 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5873 /* A SUBREG resulting from a zero extension may fold to zero if
5874 it extracts higher bits that the ZERO_EXTEND's source bits. */
5875 if (GET_CODE (op
) == ZERO_EXTEND
)
5877 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5878 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5879 return CONST0_RTX (outermode
);
5882 if (SCALAR_INT_MODE_P (outermode
)
5883 && SCALAR_INT_MODE_P (innermode
)
5884 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5885 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5887 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
5895 /* Make a SUBREG operation or equivalent if it folds. */
5898 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5899 enum machine_mode innermode
, unsigned int byte
)
5903 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5907 if (GET_CODE (op
) == SUBREG
5908 || GET_CODE (op
) == CONCAT
5909 || GET_MODE (op
) == VOIDmode
)
5912 if (validate_subreg (outermode
, innermode
, op
, byte
))
5913 return gen_rtx_SUBREG (outermode
, op
, byte
);
5918 /* Simplify X, an rtx expression.
5920 Return the simplified expression or NULL if no simplifications
5923 This is the preferred entry point into the simplification routines;
5924 however, we still allow passes to call the more specific routines.
5926 Right now GCC has three (yes, three) major bodies of RTL simplification
5927 code that need to be unified.
5929 1. fold_rtx in cse.c. This code uses various CSE specific
5930 information to aid in RTL simplification.
5932 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5933 it uses combine specific information to aid in RTL
5936 3. The routines in this file.
5939 Long term we want to only have one body of simplification code; to
5940 get to that state I recommend the following steps:
5942 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5943 which are not pass dependent state into these routines.
5945 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5946 use this routine whenever possible.
5948 3. Allow for pass dependent state to be provided to these
5949 routines and add simplifications based on the pass dependent
5950 state. Remove code from cse.c & combine.c that becomes
5953 It will take time, but ultimately the compiler will be easier to
5954 maintain and improve. It's totally silly that when we add a
5955 simplification that it needs to be added to 4 places (3 for RTL
5956 simplification and 1 for tree simplification. */
5959 simplify_rtx (const_rtx x
)
5961 const enum rtx_code code
= GET_CODE (x
);
5962 const enum machine_mode mode
= GET_MODE (x
);
5964 switch (GET_RTX_CLASS (code
))
5967 return simplify_unary_operation (code
, mode
,
5968 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5969 case RTX_COMM_ARITH
:
5970 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5971 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5973 /* Fall through.... */
5976 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5979 case RTX_BITFIELD_OPS
:
5980 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5981 XEXP (x
, 0), XEXP (x
, 1),
5985 case RTX_COMM_COMPARE
:
5986 return simplify_relational_operation (code
, mode
,
5987 ((GET_MODE (XEXP (x
, 0))
5989 ? GET_MODE (XEXP (x
, 0))
5990 : GET_MODE (XEXP (x
, 1))),
5996 return simplify_subreg (mode
, SUBREG_REG (x
),
5997 GET_MODE (SUBREG_REG (x
)),
6004 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6005 if (GET_CODE (XEXP (x
, 0)) == HIGH
6006 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))