1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
35 #include "diagnostic-core.h"
39 /* Simplification and canonicalization of RTL. */
41 /* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
45 #define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
48 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
49 static bool plus_minus_operand_p (const_rtx
);
50 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
51 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
52 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
54 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
56 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
57 enum machine_mode
, rtx
, rtx
);
58 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
59 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
65 neg_const_int (enum machine_mode mode
, const_rtx i
)
67 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
74 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
76 unsigned HOST_WIDE_INT val
;
79 if (GET_MODE_CLASS (mode
) != MODE_INT
)
82 width
= GET_MODE_PRECISION (mode
);
86 if (width
<= HOST_BITS_PER_WIDE_INT
89 else if (width
<= HOST_BITS_PER_DOUBLE_INT
90 && CONST_DOUBLE_AS_INT_P (x
)
91 && CONST_DOUBLE_LOW (x
) == 0)
93 val
= CONST_DOUBLE_HIGH (x
);
94 width
-= HOST_BITS_PER_WIDE_INT
;
97 /* FIXME: We don't yet have a representation for wider modes. */
100 if (width
< HOST_BITS_PER_WIDE_INT
)
101 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
102 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
105 /* Test whether VAL is equal to the most significant bit of mode MODE
106 (after masking with the mode mask of MODE). Returns false if the
107 precision of MODE is too large to handle. */
110 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
114 if (GET_MODE_CLASS (mode
) != MODE_INT
)
117 width
= GET_MODE_PRECISION (mode
);
118 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
121 val
&= GET_MODE_MASK (mode
);
122 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
125 /* Test whether the most significant bit of mode MODE is set in VAL.
126 Returns false if the precision of MODE is too large to handle. */
128 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
132 if (GET_MODE_CLASS (mode
) != MODE_INT
)
135 width
= GET_MODE_PRECISION (mode
);
136 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
139 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
143 /* Test whether the most significant bit of mode MODE is clear in VAL.
144 Returns false if the precision of MODE is too large to handle. */
146 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
150 if (GET_MODE_CLASS (mode
) != MODE_INT
)
153 width
= GET_MODE_PRECISION (mode
);
154 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
157 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
161 /* Make a binary operation by properly ordering the operands and
162 seeing if the expression folds. */
165 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
170 /* If this simplifies, do it. */
171 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
175 /* Put complex operands first and constants second if commutative. */
176 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
177 && swap_commutative_operands_p (op0
, op1
))
178 tem
= op0
, op0
= op1
, op1
= tem
;
180 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
183 /* If X is a MEM referencing the constant pool, return the real value.
184 Otherwise return X. */
186 avoid_constant_pool_reference (rtx x
)
189 enum machine_mode cmode
;
190 HOST_WIDE_INT offset
= 0;
192 switch (GET_CODE (x
))
198 /* Handle float extensions of constant pool references. */
200 c
= avoid_constant_pool_reference (tmp
);
201 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
205 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
206 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
214 if (GET_MODE (x
) == BLKmode
)
219 /* Call target hook to avoid the effects of -fpic etc.... */
220 addr
= targetm
.delegitimize_address (addr
);
222 /* Split the address into a base and integer offset. */
223 if (GET_CODE (addr
) == CONST
224 && GET_CODE (XEXP (addr
, 0)) == PLUS
225 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
227 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
228 addr
= XEXP (XEXP (addr
, 0), 0);
231 if (GET_CODE (addr
) == LO_SUM
)
232 addr
= XEXP (addr
, 1);
234 /* If this is a constant pool reference, we can turn it into its
235 constant and hope that simplifications happen. */
236 if (GET_CODE (addr
) == SYMBOL_REF
237 && CONSTANT_POOL_ADDRESS_P (addr
))
239 c
= get_pool_constant (addr
);
240 cmode
= get_pool_mode (addr
);
242 /* If we're accessing the constant in a different mode than it was
243 originally stored, attempt to fix that up via subreg simplifications.
244 If that fails we have no choice but to return the original memory. */
245 if (offset
!= 0 || cmode
!= GET_MODE (x
))
247 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
248 if (tem
&& CONSTANT_P (tem
))
258 /* Simplify a MEM based on its attributes. This is the default
259 delegitimize_address target hook, and it's recommended that every
260 overrider call it. */
263 delegitimize_mem_from_attrs (rtx x
)
265 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
266 use their base addresses as equivalent. */
269 && MEM_OFFSET_KNOWN_P (x
))
271 tree decl
= MEM_EXPR (x
);
272 enum machine_mode mode
= GET_MODE (x
);
273 HOST_WIDE_INT offset
= 0;
275 switch (TREE_CODE (decl
))
285 case ARRAY_RANGE_REF
:
290 case VIEW_CONVERT_EXPR
:
292 HOST_WIDE_INT bitsize
, bitpos
;
294 int unsignedp
, volatilep
= 0;
296 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
297 &mode
, &unsignedp
, &volatilep
, false);
298 if (bitsize
!= GET_MODE_BITSIZE (mode
)
299 || (bitpos
% BITS_PER_UNIT
)
300 || (toffset
&& !host_integerp (toffset
, 0)))
304 offset
+= bitpos
/ BITS_PER_UNIT
;
306 offset
+= TREE_INT_CST_LOW (toffset
);
313 && mode
== GET_MODE (x
)
314 && TREE_CODE (decl
) == VAR_DECL
315 && (TREE_STATIC (decl
)
316 || DECL_THREAD_LOCAL_P (decl
))
317 && DECL_RTL_SET_P (decl
)
318 && MEM_P (DECL_RTL (decl
)))
322 offset
+= MEM_OFFSET (x
);
324 newx
= DECL_RTL (decl
);
328 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
330 /* Avoid creating a new MEM needlessly if we already had
331 the same address. We do if there's no OFFSET and the
332 old address X is identical to NEWX, or if X is of the
333 form (plus NEWX OFFSET), or the NEWX is of the form
334 (plus Y (const_int Z)) and X is that with the offset
335 added: (plus Y (const_int Z+OFFSET)). */
337 || (GET_CODE (o
) == PLUS
338 && GET_CODE (XEXP (o
, 1)) == CONST_INT
339 && (offset
== INTVAL (XEXP (o
, 1))
340 || (GET_CODE (n
) == PLUS
341 && GET_CODE (XEXP (n
, 1)) == CONST_INT
342 && (INTVAL (XEXP (n
, 1)) + offset
343 == INTVAL (XEXP (o
, 1)))
344 && (n
= XEXP (n
, 0))))
345 && (o
= XEXP (o
, 0))))
346 && rtx_equal_p (o
, n
)))
347 x
= adjust_address_nv (newx
, mode
, offset
);
349 else if (GET_MODE (x
) == GET_MODE (newx
)
358 /* Make a unary operation by first seeing if it folds and otherwise making
359 the specified operation. */
362 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
363 enum machine_mode op_mode
)
367 /* If this simplifies, use it. */
368 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
371 return gen_rtx_fmt_e (code
, mode
, op
);
374 /* Likewise for ternary operations. */
377 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
378 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
382 /* If this simplifies, use it. */
383 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
387 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
390 /* Likewise, for relational operations.
391 CMP_MODE specifies mode comparison is done in. */
394 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
395 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
399 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
403 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
406 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
407 and simplify the result. If FN is non-NULL, call this callback on each
408 X, if it returns non-NULL, replace X with its return value and simplify the
412 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
413 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
415 enum rtx_code code
= GET_CODE (x
);
416 enum machine_mode mode
= GET_MODE (x
);
417 enum machine_mode op_mode
;
419 rtx op0
, op1
, op2
, newx
, op
;
423 if (__builtin_expect (fn
!= NULL
, 0))
425 newx
= fn (x
, old_rtx
, data
);
429 else if (rtx_equal_p (x
, old_rtx
))
430 return copy_rtx ((rtx
) data
);
432 switch (GET_RTX_CLASS (code
))
436 op_mode
= GET_MODE (op0
);
437 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
438 if (op0
== XEXP (x
, 0))
440 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
444 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
445 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
446 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
448 return simplify_gen_binary (code
, mode
, op0
, op1
);
451 case RTX_COMM_COMPARE
:
454 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
455 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
456 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
457 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
459 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
462 case RTX_BITFIELD_OPS
:
464 op_mode
= GET_MODE (op0
);
465 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
466 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
467 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
468 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
470 if (op_mode
== VOIDmode
)
471 op_mode
= GET_MODE (op0
);
472 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
477 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
478 if (op0
== SUBREG_REG (x
))
480 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
481 GET_MODE (SUBREG_REG (x
)),
483 return op0
? op0
: x
;
490 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
491 if (op0
== XEXP (x
, 0))
493 return replace_equiv_address_nv (x
, op0
);
495 else if (code
== LO_SUM
)
497 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
498 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
500 /* (lo_sum (high x) x) -> x */
501 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
504 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
506 return gen_rtx_LO_SUM (mode
, op0
, op1
);
515 fmt
= GET_RTX_FORMAT (code
);
516 for (i
= 0; fmt
[i
]; i
++)
521 newvec
= XVEC (newx
, i
);
522 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
524 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
526 if (op
!= RTVEC_ELT (vec
, j
))
530 newvec
= shallow_copy_rtvec (vec
);
532 newx
= shallow_copy_rtx (x
);
533 XVEC (newx
, i
) = newvec
;
535 RTVEC_ELT (newvec
, j
) = op
;
543 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
544 if (op
!= XEXP (x
, i
))
547 newx
= shallow_copy_rtx (x
);
556 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
557 resulting RTX. Return a new RTX which is as simplified as possible. */
560 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
562 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
565 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
566 Only handle cases where the truncated value is inherently an rvalue.
568 RTL provides two ways of truncating a value:
570 1. a lowpart subreg. This form is only a truncation when both
571 the outer and inner modes (here MODE and OP_MODE respectively)
572 are scalar integers, and only then when the subreg is used as
575 It is only valid to form such truncating subregs if the
576 truncation requires no action by the target. The onus for
577 proving this is on the creator of the subreg -- e.g. the
578 caller to simplify_subreg or simplify_gen_subreg -- and typically
579 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
581 2. a TRUNCATE. This form handles both scalar and compound integers.
583 The first form is preferred where valid. However, the TRUNCATE
584 handling in simplify_unary_operation turns the second form into the
585 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
586 so it is generally safe to form rvalue truncations using:
588 simplify_gen_unary (TRUNCATE, ...)
590 and leave simplify_unary_operation to work out which representation
593 Because of the proof requirements on (1), simplify_truncation must
594 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
595 regardless of whether the outer truncation came from a SUBREG or a
596 TRUNCATE. For example, if the caller has proven that an SImode
601 is a no-op and can be represented as a subreg, it does not follow
602 that SImode truncations of X and Y are also no-ops. On a target
603 like 64-bit MIPS that requires SImode values to be stored in
604 sign-extended form, an SImode truncation of:
606 (and:DI (reg:DI X) (const_int 63))
608 is trivially a no-op because only the lower 6 bits can be set.
609 However, X is still an arbitrary 64-bit number and so we cannot
610 assume that truncating it too is a no-op. */
613 simplify_truncation (enum machine_mode mode
, rtx op
,
614 enum machine_mode op_mode
)
616 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
617 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
618 gcc_assert (precision
<= op_precision
);
620 /* Optimize truncations of zero and sign extended values. */
621 if (GET_CODE (op
) == ZERO_EXTEND
622 || GET_CODE (op
) == SIGN_EXTEND
)
624 /* There are three possibilities. If MODE is the same as the
625 origmode, we can omit both the extension and the subreg.
626 If MODE is not larger than the origmode, we can apply the
627 truncation without the extension. Finally, if the outermode
628 is larger than the origmode, we can just extend to the appropriate
630 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
631 if (mode
== origmode
)
633 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
634 return simplify_gen_unary (TRUNCATE
, mode
,
635 XEXP (op
, 0), origmode
);
637 return simplify_gen_unary (GET_CODE (op
), mode
,
638 XEXP (op
, 0), origmode
);
641 /* Simplify (truncate:SI (op:DI (x:DI) (y:DI)))
642 to (op:SI (truncate:SI (x:DI)) (truncate:SI (x:DI))). */
643 if (GET_CODE (op
) == PLUS
644 || GET_CODE (op
) == MINUS
645 || GET_CODE (op
) == MULT
)
647 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
650 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
652 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
656 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
657 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
658 the outer subreg is effectively a truncation to the original mode. */
659 if ((GET_CODE (op
) == LSHIFTRT
660 || GET_CODE (op
) == ASHIFTRT
)
661 /* Ensure that OP_MODE is at least twice as wide as MODE
662 to avoid the possibility that an outer LSHIFTRT shifts by more
663 than the sign extension's sign_bit_copies and introduces zeros
664 into the high bits of the result. */
665 && 2 * precision
<= op_precision
666 && CONST_INT_P (XEXP (op
, 1))
667 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
668 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
669 && UINTVAL (XEXP (op
, 1)) < precision
)
670 return simplify_gen_binary (ASHIFTRT
, mode
,
671 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
673 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
674 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
675 the outer subreg is effectively a truncation to the original mode. */
676 if ((GET_CODE (op
) == LSHIFTRT
677 || GET_CODE (op
) == ASHIFTRT
)
678 && CONST_INT_P (XEXP (op
, 1))
679 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
680 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
681 && UINTVAL (XEXP (op
, 1)) < precision
)
682 return simplify_gen_binary (LSHIFTRT
, mode
,
683 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
685 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
686 to (ashift:QI (x:QI) C), where C is a suitable small constant and
687 the outer subreg is effectively a truncation to the original mode. */
688 if (GET_CODE (op
) == ASHIFT
689 && CONST_INT_P (XEXP (op
, 1))
690 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
691 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
692 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
693 && UINTVAL (XEXP (op
, 1)) < precision
)
694 return simplify_gen_binary (ASHIFT
, mode
,
695 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
697 /* Recognize a word extraction from a multi-word subreg. */
698 if ((GET_CODE (op
) == LSHIFTRT
699 || GET_CODE (op
) == ASHIFTRT
)
700 && SCALAR_INT_MODE_P (mode
)
701 && SCALAR_INT_MODE_P (op_mode
)
702 && precision
>= BITS_PER_WORD
703 && 2 * precision
<= op_precision
704 && CONST_INT_P (XEXP (op
, 1))
705 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
706 && UINTVAL (XEXP (op
, 1)) < op_precision
)
708 int byte
= subreg_lowpart_offset (mode
, op_mode
);
709 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
710 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
712 ? byte
- shifted_bytes
713 : byte
+ shifted_bytes
));
716 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
717 and try replacing the TRUNCATE and shift with it. Don't do this
718 if the MEM has a mode-dependent address. */
719 if ((GET_CODE (op
) == LSHIFTRT
720 || GET_CODE (op
) == ASHIFTRT
)
721 && SCALAR_INT_MODE_P (op_mode
)
722 && MEM_P (XEXP (op
, 0))
723 && CONST_INT_P (XEXP (op
, 1))
724 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
725 && INTVAL (XEXP (op
, 1)) > 0
726 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
727 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
728 MEM_ADDR_SPACE (XEXP (op
, 0)))
729 && ! MEM_VOLATILE_P (XEXP (op
, 0))
730 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
731 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
733 int byte
= subreg_lowpart_offset (mode
, op_mode
);
734 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
735 return adjust_address_nv (XEXP (op
, 0), mode
,
737 ? byte
- shifted_bytes
738 : byte
+ shifted_bytes
));
741 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
742 (OP:SI foo:SI) if OP is NEG or ABS. */
743 if ((GET_CODE (op
) == ABS
744 || GET_CODE (op
) == NEG
)
745 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
746 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
747 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
748 return simplify_gen_unary (GET_CODE (op
), mode
,
749 XEXP (XEXP (op
, 0), 0), mode
);
751 /* (truncate:A (subreg:B (truncate:C X) 0)) is
753 if (GET_CODE (op
) == SUBREG
754 && SCALAR_INT_MODE_P (mode
)
755 && SCALAR_INT_MODE_P (op_mode
)
756 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
757 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
758 && subreg_lowpart_p (op
))
759 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
760 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
762 /* (truncate:A (truncate:B X)) is (truncate:A X). */
763 if (GET_CODE (op
) == TRUNCATE
)
764 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
765 GET_MODE (XEXP (op
, 0)));
770 /* Try to simplify a unary operation CODE whose output mode is to be
771 MODE with input operand OP whose mode was originally OP_MODE.
772 Return zero if no simplification can be made. */
774 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
775 rtx op
, enum machine_mode op_mode
)
779 trueop
= avoid_constant_pool_reference (op
);
781 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
785 return simplify_unary_operation_1 (code
, mode
, op
);
788 /* Perform some simplifications we can do even if the operands
791 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
793 enum rtx_code reversed
;
799 /* (not (not X)) == X. */
800 if (GET_CODE (op
) == NOT
)
803 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
804 comparison is all ones. */
805 if (COMPARISON_P (op
)
806 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
807 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
808 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
809 XEXP (op
, 0), XEXP (op
, 1));
811 /* (not (plus X -1)) can become (neg X). */
812 if (GET_CODE (op
) == PLUS
813 && XEXP (op
, 1) == constm1_rtx
)
814 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
816 /* Similarly, (not (neg X)) is (plus X -1). */
817 if (GET_CODE (op
) == NEG
)
818 return plus_constant (mode
, XEXP (op
, 0), -1);
820 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
821 if (GET_CODE (op
) == XOR
822 && CONST_INT_P (XEXP (op
, 1))
823 && (temp
= simplify_unary_operation (NOT
, mode
,
824 XEXP (op
, 1), mode
)) != 0)
825 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
827 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
828 if (GET_CODE (op
) == PLUS
829 && CONST_INT_P (XEXP (op
, 1))
830 && mode_signbit_p (mode
, XEXP (op
, 1))
831 && (temp
= simplify_unary_operation (NOT
, mode
,
832 XEXP (op
, 1), mode
)) != 0)
833 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
836 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
837 operands other than 1, but that is not valid. We could do a
838 similar simplification for (not (lshiftrt C X)) where C is
839 just the sign bit, but this doesn't seem common enough to
841 if (GET_CODE (op
) == ASHIFT
842 && XEXP (op
, 0) == const1_rtx
)
844 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
845 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
848 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
849 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
850 so we can perform the above simplification. */
852 if (STORE_FLAG_VALUE
== -1
853 && GET_CODE (op
) == ASHIFTRT
854 && GET_CODE (XEXP (op
, 1))
855 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
856 return simplify_gen_relational (GE
, mode
, VOIDmode
,
857 XEXP (op
, 0), const0_rtx
);
860 if (GET_CODE (op
) == SUBREG
861 && subreg_lowpart_p (op
)
862 && (GET_MODE_SIZE (GET_MODE (op
))
863 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
864 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
865 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
867 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
870 x
= gen_rtx_ROTATE (inner_mode
,
871 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
873 XEXP (SUBREG_REG (op
), 1));
874 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
879 /* Apply De Morgan's laws to reduce number of patterns for machines
880 with negating logical insns (and-not, nand, etc.). If result has
881 only one NOT, put it first, since that is how the patterns are
884 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
886 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
887 enum machine_mode op_mode
;
889 op_mode
= GET_MODE (in1
);
890 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
892 op_mode
= GET_MODE (in2
);
893 if (op_mode
== VOIDmode
)
895 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
897 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
900 in2
= in1
; in1
= tem
;
903 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
909 /* (neg (neg X)) == X. */
910 if (GET_CODE (op
) == NEG
)
913 /* (neg (plus X 1)) can become (not X). */
914 if (GET_CODE (op
) == PLUS
915 && XEXP (op
, 1) == const1_rtx
)
916 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
918 /* Similarly, (neg (not X)) is (plus X 1). */
919 if (GET_CODE (op
) == NOT
)
920 return plus_constant (mode
, XEXP (op
, 0), 1);
922 /* (neg (minus X Y)) can become (minus Y X). This transformation
923 isn't safe for modes with signed zeros, since if X and Y are
924 both +0, (minus Y X) is the same as (minus X Y). If the
925 rounding mode is towards +infinity (or -infinity) then the two
926 expressions will be rounded differently. */
927 if (GET_CODE (op
) == MINUS
928 && !HONOR_SIGNED_ZEROS (mode
)
929 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
930 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
932 if (GET_CODE (op
) == PLUS
933 && !HONOR_SIGNED_ZEROS (mode
)
934 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
936 /* (neg (plus A C)) is simplified to (minus -C A). */
937 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
938 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
940 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
942 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
945 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
946 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
947 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
950 /* (neg (mult A B)) becomes (mult A (neg B)).
951 This works even for floating-point values. */
952 if (GET_CODE (op
) == MULT
953 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
955 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
956 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
959 /* NEG commutes with ASHIFT since it is multiplication. Only do
960 this if we can then eliminate the NEG (e.g., if the operand
962 if (GET_CODE (op
) == ASHIFT
)
964 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
966 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
969 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
970 C is equal to the width of MODE minus 1. */
971 if (GET_CODE (op
) == ASHIFTRT
972 && CONST_INT_P (XEXP (op
, 1))
973 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
974 return simplify_gen_binary (LSHIFTRT
, mode
,
975 XEXP (op
, 0), XEXP (op
, 1));
977 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
978 C is equal to the width of MODE minus 1. */
979 if (GET_CODE (op
) == LSHIFTRT
980 && CONST_INT_P (XEXP (op
, 1))
981 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
982 return simplify_gen_binary (ASHIFTRT
, mode
,
983 XEXP (op
, 0), XEXP (op
, 1));
985 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
986 if (GET_CODE (op
) == XOR
987 && XEXP (op
, 1) == const1_rtx
988 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
989 return plus_constant (mode
, XEXP (op
, 0), -1);
991 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
992 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
993 if (GET_CODE (op
) == LT
994 && XEXP (op
, 1) == const0_rtx
995 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
997 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
998 int isize
= GET_MODE_PRECISION (inner
);
999 if (STORE_FLAG_VALUE
== 1)
1001 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1002 GEN_INT (isize
- 1));
1005 if (GET_MODE_PRECISION (mode
) > isize
)
1006 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1007 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1009 else if (STORE_FLAG_VALUE
== -1)
1011 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1012 GEN_INT (isize
- 1));
1015 if (GET_MODE_PRECISION (mode
) > isize
)
1016 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1017 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1023 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1024 with the umulXi3_highpart patterns. */
1025 if (GET_CODE (op
) == LSHIFTRT
1026 && GET_CODE (XEXP (op
, 0)) == MULT
)
1029 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1031 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1033 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1037 /* We can't handle truncation to a partial integer mode here
1038 because we don't know the real bitsize of the partial
1043 if (GET_MODE (op
) != VOIDmode
)
1045 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1050 /* If we know that the value is already truncated, we can
1051 replace the TRUNCATE with a SUBREG. */
1052 if (GET_MODE_NUNITS (mode
) == 1
1053 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1054 || truncated_to_mode (mode
, op
)))
1056 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1061 /* A truncate of a comparison can be replaced with a subreg if
1062 STORE_FLAG_VALUE permits. This is like the previous test,
1063 but it works even if the comparison is done in a mode larger
1064 than HOST_BITS_PER_WIDE_INT. */
1065 if (HWI_COMPUTABLE_MODE_P (mode
)
1066 && COMPARISON_P (op
)
1067 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1069 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1074 /* A truncate of a memory is just loading the low part of the memory
1075 if we are not changing the meaning of the address. */
1076 if (GET_CODE (op
) == MEM
1077 && !VECTOR_MODE_P (mode
)
1078 && !MEM_VOLATILE_P (op
)
1079 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1081 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1088 case FLOAT_TRUNCATE
:
1089 if (DECIMAL_FLOAT_MODE_P (mode
))
1092 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1093 if (GET_CODE (op
) == FLOAT_EXTEND
1094 && GET_MODE (XEXP (op
, 0)) == mode
)
1095 return XEXP (op
, 0);
1097 /* (float_truncate:SF (float_truncate:DF foo:XF))
1098 = (float_truncate:SF foo:XF).
1099 This may eliminate double rounding, so it is unsafe.
1101 (float_truncate:SF (float_extend:XF foo:DF))
1102 = (float_truncate:SF foo:DF).
1104 (float_truncate:DF (float_extend:XF foo:SF))
1105 = (float_extend:SF foo:DF). */
1106 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1107 && flag_unsafe_math_optimizations
)
1108 || GET_CODE (op
) == FLOAT_EXTEND
)
1109 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1111 > GET_MODE_SIZE (mode
)
1112 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1114 XEXP (op
, 0), mode
);
1116 /* (float_truncate (float x)) is (float x) */
1117 if (GET_CODE (op
) == FLOAT
1118 && (flag_unsafe_math_optimizations
1119 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1120 && ((unsigned)significand_size (GET_MODE (op
))
1121 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1122 - num_sign_bit_copies (XEXP (op
, 0),
1123 GET_MODE (XEXP (op
, 0))))))))
1124 return simplify_gen_unary (FLOAT
, mode
,
1126 GET_MODE (XEXP (op
, 0)));
1128 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1129 (OP:SF foo:SF) if OP is NEG or ABS. */
1130 if ((GET_CODE (op
) == ABS
1131 || GET_CODE (op
) == NEG
)
1132 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1133 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1134 return simplify_gen_unary (GET_CODE (op
), mode
,
1135 XEXP (XEXP (op
, 0), 0), mode
);
1137 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1138 is (float_truncate:SF x). */
1139 if (GET_CODE (op
) == SUBREG
1140 && subreg_lowpart_p (op
)
1141 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1142 return SUBREG_REG (op
);
1146 if (DECIMAL_FLOAT_MODE_P (mode
))
1149 /* (float_extend (float_extend x)) is (float_extend x)
1151 (float_extend (float x)) is (float x) assuming that double
1152 rounding can't happen.
1154 if (GET_CODE (op
) == FLOAT_EXTEND
1155 || (GET_CODE (op
) == FLOAT
1156 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1157 && ((unsigned)significand_size (GET_MODE (op
))
1158 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
1159 - num_sign_bit_copies (XEXP (op
, 0),
1160 GET_MODE (XEXP (op
, 0)))))))
1161 return simplify_gen_unary (GET_CODE (op
), mode
,
1163 GET_MODE (XEXP (op
, 0)));
1168 /* (abs (neg <foo>)) -> (abs <foo>) */
1169 if (GET_CODE (op
) == NEG
)
1170 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1171 GET_MODE (XEXP (op
, 0)));
1173 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1175 if (GET_MODE (op
) == VOIDmode
)
1178 /* If operand is something known to be positive, ignore the ABS. */
1179 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1180 || val_signbit_known_clear_p (GET_MODE (op
),
1181 nonzero_bits (op
, GET_MODE (op
))))
1184 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1185 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1186 return gen_rtx_NEG (mode
, op
);
1191 /* (ffs (*_extend <X>)) = (ffs <X>) */
1192 if (GET_CODE (op
) == SIGN_EXTEND
1193 || GET_CODE (op
) == ZERO_EXTEND
)
1194 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1195 GET_MODE (XEXP (op
, 0)));
1199 switch (GET_CODE (op
))
1203 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1204 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1205 GET_MODE (XEXP (op
, 0)));
1209 /* Rotations don't affect popcount. */
1210 if (!side_effects_p (XEXP (op
, 1)))
1211 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1212 GET_MODE (XEXP (op
, 0)));
1221 switch (GET_CODE (op
))
1227 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1228 GET_MODE (XEXP (op
, 0)));
1232 /* Rotations don't affect parity. */
1233 if (!side_effects_p (XEXP (op
, 1)))
1234 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1235 GET_MODE (XEXP (op
, 0)));
1244 /* (bswap (bswap x)) -> x. */
1245 if (GET_CODE (op
) == BSWAP
)
1246 return XEXP (op
, 0);
1250 /* (float (sign_extend <X>)) = (float <X>). */
1251 if (GET_CODE (op
) == SIGN_EXTEND
)
1252 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1253 GET_MODE (XEXP (op
, 0)));
1257 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1258 becomes just the MINUS if its mode is MODE. This allows
1259 folding switch statements on machines using casesi (such as
1261 if (GET_CODE (op
) == TRUNCATE
1262 && GET_MODE (XEXP (op
, 0)) == mode
1263 && GET_CODE (XEXP (op
, 0)) == MINUS
1264 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1265 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1266 return XEXP (op
, 0);
1268 /* Extending a widening multiplication should be canonicalized to
1269 a wider widening multiplication. */
1270 if (GET_CODE (op
) == MULT
)
1272 rtx lhs
= XEXP (op
, 0);
1273 rtx rhs
= XEXP (op
, 1);
1274 enum rtx_code lcode
= GET_CODE (lhs
);
1275 enum rtx_code rcode
= GET_CODE (rhs
);
1277 /* Widening multiplies usually extend both operands, but sometimes
1278 they use a shift to extract a portion of a register. */
1279 if ((lcode
== SIGN_EXTEND
1280 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1281 && (rcode
== SIGN_EXTEND
1282 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1284 enum machine_mode lmode
= GET_MODE (lhs
);
1285 enum machine_mode rmode
= GET_MODE (rhs
);
1288 if (lcode
== ASHIFTRT
)
1289 /* Number of bits not shifted off the end. */
1290 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1291 else /* lcode == SIGN_EXTEND */
1292 /* Size of inner mode. */
1293 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1295 if (rcode
== ASHIFTRT
)
1296 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1297 else /* rcode == SIGN_EXTEND */
1298 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1300 /* We can only widen multiplies if the result is mathematiclly
1301 equivalent. I.e. if overflow was impossible. */
1302 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1303 return simplify_gen_binary
1305 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1306 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1310 /* Check for a sign extension of a subreg of a promoted
1311 variable, where the promotion is sign-extended, and the
1312 target mode is the same as the variable's promotion. */
1313 if (GET_CODE (op
) == SUBREG
1314 && SUBREG_PROMOTED_VAR_P (op
)
1315 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1316 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1318 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1323 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1324 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1325 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1327 gcc_assert (GET_MODE_BITSIZE (mode
)
1328 > GET_MODE_BITSIZE (GET_MODE (op
)));
1329 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1330 GET_MODE (XEXP (op
, 0)));
1333 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1334 is (sign_extend:M (subreg:O <X>)) if there is mode with
1335 GET_MODE_BITSIZE (N) - I bits.
1336 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1337 is similarly (zero_extend:M (subreg:O <X>)). */
1338 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1339 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1340 && CONST_INT_P (XEXP (op
, 1))
1341 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1342 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1344 enum machine_mode tmode
1345 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1346 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1347 gcc_assert (GET_MODE_BITSIZE (mode
)
1348 > GET_MODE_BITSIZE (GET_MODE (op
)));
1349 if (tmode
!= BLKmode
)
1352 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1354 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1355 ? SIGN_EXTEND
: ZERO_EXTEND
,
1356 mode
, inner
, tmode
);
1360 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1361 /* As we do not know which address space the pointer is referring to,
1362 we can do this only if the target does not support different pointer
1363 or address modes depending on the address space. */
1364 if (target_default_pointer_address_modes_p ()
1365 && ! POINTERS_EXTEND_UNSIGNED
1366 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1368 || (GET_CODE (op
) == SUBREG
1369 && REG_P (SUBREG_REG (op
))
1370 && REG_POINTER (SUBREG_REG (op
))
1371 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1372 return convert_memory_address (Pmode
, op
);
1377 /* Check for a zero extension of a subreg of a promoted
1378 variable, where the promotion is zero-extended, and the
1379 target mode is the same as the variable's promotion. */
1380 if (GET_CODE (op
) == SUBREG
1381 && SUBREG_PROMOTED_VAR_P (op
)
1382 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1383 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1385 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1390 /* Extending a widening multiplication should be canonicalized to
1391 a wider widening multiplication. */
1392 if (GET_CODE (op
) == MULT
)
1394 rtx lhs
= XEXP (op
, 0);
1395 rtx rhs
= XEXP (op
, 1);
1396 enum rtx_code lcode
= GET_CODE (lhs
);
1397 enum rtx_code rcode
= GET_CODE (rhs
);
1399 /* Widening multiplies usually extend both operands, but sometimes
1400 they use a shift to extract a portion of a register. */
1401 if ((lcode
== ZERO_EXTEND
1402 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1403 && (rcode
== ZERO_EXTEND
1404 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1406 enum machine_mode lmode
= GET_MODE (lhs
);
1407 enum machine_mode rmode
= GET_MODE (rhs
);
1410 if (lcode
== LSHIFTRT
)
1411 /* Number of bits not shifted off the end. */
1412 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1413 else /* lcode == ZERO_EXTEND */
1414 /* Size of inner mode. */
1415 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1417 if (rcode
== LSHIFTRT
)
1418 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1419 else /* rcode == ZERO_EXTEND */
1420 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1422 /* We can only widen multiplies if the result is mathematiclly
1423 equivalent. I.e. if overflow was impossible. */
1424 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1425 return simplify_gen_binary
1427 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1428 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1432 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1433 if (GET_CODE (op
) == ZERO_EXTEND
)
1434 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1435 GET_MODE (XEXP (op
, 0)));
1437 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1438 is (zero_extend:M (subreg:O <X>)) if there is mode with
1439 GET_MODE_BITSIZE (N) - I bits. */
1440 if (GET_CODE (op
) == LSHIFTRT
1441 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1442 && CONST_INT_P (XEXP (op
, 1))
1443 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1444 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1446 enum machine_mode tmode
1447 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1448 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1449 if (tmode
!= BLKmode
)
1452 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1454 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1458 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1459 /* As we do not know which address space the pointer is referring to,
1460 we can do this only if the target does not support different pointer
1461 or address modes depending on the address space. */
1462 if (target_default_pointer_address_modes_p ()
1463 && POINTERS_EXTEND_UNSIGNED
> 0
1464 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1466 || (GET_CODE (op
) == SUBREG
1467 && REG_P (SUBREG_REG (op
))
1468 && REG_POINTER (SUBREG_REG (op
))
1469 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1470 return convert_memory_address (Pmode
, op
);
1481 /* Try to compute the value of a unary operation CODE whose output mode is to
1482 be MODE with input operand OP whose mode was originally OP_MODE.
1483 Return zero if the value cannot be computed. */
1485 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1486 rtx op
, enum machine_mode op_mode
)
1488 unsigned int width
= GET_MODE_PRECISION (mode
);
1489 unsigned int op_width
= GET_MODE_PRECISION (op_mode
);
1491 if (code
== VEC_DUPLICATE
)
1493 gcc_assert (VECTOR_MODE_P (mode
));
1494 if (GET_MODE (op
) != VOIDmode
)
1496 if (!VECTOR_MODE_P (GET_MODE (op
)))
1497 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1499 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1502 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1503 || GET_CODE (op
) == CONST_VECTOR
)
1505 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1506 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1507 rtvec v
= rtvec_alloc (n_elts
);
1510 if (GET_CODE (op
) != CONST_VECTOR
)
1511 for (i
= 0; i
< n_elts
; i
++)
1512 RTVEC_ELT (v
, i
) = op
;
1515 enum machine_mode inmode
= GET_MODE (op
);
1516 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1517 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1519 gcc_assert (in_n_elts
< n_elts
);
1520 gcc_assert ((n_elts
% in_n_elts
) == 0);
1521 for (i
= 0; i
< n_elts
; i
++)
1522 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1524 return gen_rtx_CONST_VECTOR (mode
, v
);
1528 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1530 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1531 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1532 enum machine_mode opmode
= GET_MODE (op
);
1533 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1534 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1535 rtvec v
= rtvec_alloc (n_elts
);
1538 gcc_assert (op_n_elts
== n_elts
);
1539 for (i
= 0; i
< n_elts
; i
++)
1541 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1542 CONST_VECTOR_ELT (op
, i
),
1543 GET_MODE_INNER (opmode
));
1546 RTVEC_ELT (v
, i
) = x
;
1548 return gen_rtx_CONST_VECTOR (mode
, v
);
1551 /* The order of these tests is critical so that, for example, we don't
1552 check the wrong mode (input vs. output) for a conversion operation,
1553 such as FIX. At some point, this should be simplified. */
1555 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1557 HOST_WIDE_INT hv
, lv
;
1560 if (CONST_INT_P (op
))
1561 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1563 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1565 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1566 d
= real_value_truncate (mode
, d
);
1567 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1569 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1571 HOST_WIDE_INT hv
, lv
;
1574 if (CONST_INT_P (op
))
1575 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1577 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1579 if (op_mode
== VOIDmode
1580 || GET_MODE_PRECISION (op_mode
) > HOST_BITS_PER_DOUBLE_INT
)
1581 /* We should never get a negative number. */
1582 gcc_assert (hv
>= 0);
1583 else if (GET_MODE_PRECISION (op_mode
) <= HOST_BITS_PER_WIDE_INT
)
1584 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1586 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1587 d
= real_value_truncate (mode
, d
);
1588 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1591 if (CONST_INT_P (op
)
1592 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1594 HOST_WIDE_INT arg0
= INTVAL (op
);
1608 val
= (arg0
>= 0 ? arg0
: - arg0
);
1612 arg0
&= GET_MODE_MASK (mode
);
1613 val
= ffs_hwi (arg0
);
1617 arg0
&= GET_MODE_MASK (mode
);
1618 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1621 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 1;
1625 arg0
&= GET_MODE_MASK (mode
);
1627 val
= GET_MODE_PRECISION (mode
) - 1;
1629 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 2;
1631 val
= GET_MODE_PRECISION (mode
) - floor_log2 (~arg0
) - 2;
1635 arg0
&= GET_MODE_MASK (mode
);
1638 /* Even if the value at zero is undefined, we have to come
1639 up with some replacement. Seems good enough. */
1640 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1641 val
= GET_MODE_PRECISION (mode
);
1644 val
= ctz_hwi (arg0
);
1648 arg0
&= GET_MODE_MASK (mode
);
1651 val
++, arg0
&= arg0
- 1;
1655 arg0
&= GET_MODE_MASK (mode
);
1658 val
++, arg0
&= arg0
- 1;
1667 for (s
= 0; s
< width
; s
+= 8)
1669 unsigned int d
= width
- s
- 8;
1670 unsigned HOST_WIDE_INT byte
;
1671 byte
= (arg0
>> s
) & 0xff;
1682 /* When zero-extending a CONST_INT, we need to know its
1684 gcc_assert (op_mode
!= VOIDmode
);
1685 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1687 /* If we were really extending the mode,
1688 we would have to distinguish between zero-extension
1689 and sign-extension. */
1690 gcc_assert (width
== op_width
);
1693 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1694 val
= arg0
& GET_MODE_MASK (op_mode
);
1700 if (op_mode
== VOIDmode
)
1702 op_width
= GET_MODE_PRECISION (op_mode
);
1703 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1705 /* If we were really extending the mode,
1706 we would have to distinguish between zero-extension
1707 and sign-extension. */
1708 gcc_assert (width
== op_width
);
1711 else if (op_width
< HOST_BITS_PER_WIDE_INT
)
1713 val
= arg0
& GET_MODE_MASK (op_mode
);
1714 if (val_signbit_known_set_p (op_mode
, val
))
1715 val
|= ~GET_MODE_MASK (op_mode
);
1723 case FLOAT_TRUNCATE
:
1735 return gen_int_mode (val
, mode
);
1738 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1739 for a DImode operation on a CONST_INT. */
1740 else if (width
<= HOST_BITS_PER_DOUBLE_INT
1741 && (CONST_DOUBLE_AS_INT_P (op
) || CONST_INT_P (op
)))
1743 double_int first
, value
;
1745 if (CONST_DOUBLE_AS_INT_P (op
))
1746 first
= double_int::from_pair (CONST_DOUBLE_HIGH (op
),
1747 CONST_DOUBLE_LOW (op
));
1749 first
= double_int::from_shwi (INTVAL (op
));
1762 if (first
.is_negative ())
1771 value
.low
= ffs_hwi (first
.low
);
1772 else if (first
.high
!= 0)
1773 value
.low
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (first
.high
);
1780 if (first
.high
!= 0)
1781 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.high
) - 1
1782 - HOST_BITS_PER_WIDE_INT
;
1783 else if (first
.low
!= 0)
1784 value
.low
= GET_MODE_PRECISION (mode
) - floor_log2 (first
.low
) - 1;
1785 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1786 value
.low
= GET_MODE_PRECISION (mode
);
1792 value
.low
= ctz_hwi (first
.low
);
1793 else if (first
.high
!= 0)
1794 value
.low
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (first
.high
);
1795 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, value
.low
))
1796 value
.low
= GET_MODE_PRECISION (mode
);
1800 value
= double_int_zero
;
1804 first
.low
&= first
.low
- 1;
1809 first
.high
&= first
.high
- 1;
1814 value
= double_int_zero
;
1818 first
.low
&= first
.low
- 1;
1823 first
.high
&= first
.high
- 1;
1832 value
= double_int_zero
;
1833 for (s
= 0; s
< width
; s
+= 8)
1835 unsigned int d
= width
- s
- 8;
1836 unsigned HOST_WIDE_INT byte
;
1838 if (s
< HOST_BITS_PER_WIDE_INT
)
1839 byte
= (first
.low
>> s
) & 0xff;
1841 byte
= (first
.high
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1843 if (d
< HOST_BITS_PER_WIDE_INT
)
1844 value
.low
|= byte
<< d
;
1846 value
.high
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1852 /* This is just a change-of-mode, so do nothing. */
1857 gcc_assert (op_mode
!= VOIDmode
);
1859 if (op_width
> HOST_BITS_PER_WIDE_INT
)
1862 value
= double_int::from_uhwi (first
.low
& GET_MODE_MASK (op_mode
));
1866 if (op_mode
== VOIDmode
1867 || op_width
> HOST_BITS_PER_WIDE_INT
)
1871 value
.low
= first
.low
& GET_MODE_MASK (op_mode
);
1872 if (val_signbit_known_set_p (op_mode
, value
.low
))
1873 value
.low
|= ~GET_MODE_MASK (op_mode
);
1875 value
.high
= HWI_SIGN_EXTEND (value
.low
);
1886 return immed_double_int_const (value
, mode
);
1889 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1890 && SCALAR_FLOAT_MODE_P (mode
)
1891 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1893 REAL_VALUE_TYPE d
, t
;
1894 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1899 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1901 real_sqrt (&t
, mode
, &d
);
1905 d
= real_value_abs (&d
);
1908 d
= real_value_negate (&d
);
1910 case FLOAT_TRUNCATE
:
1911 d
= real_value_truncate (mode
, d
);
1914 /* All this does is change the mode, unless changing
1916 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1917 real_convert (&d
, mode
, &d
);
1920 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1927 real_to_target (tmp
, &d
, GET_MODE (op
));
1928 for (i
= 0; i
< 4; i
++)
1930 real_from_target (&d
, tmp
, mode
);
1936 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1939 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1940 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1941 && GET_MODE_CLASS (mode
) == MODE_INT
1942 && width
<= HOST_BITS_PER_DOUBLE_INT
&& width
> 0)
1944 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1945 operators are intentionally left unspecified (to ease implementation
1946 by target backends), for consistency, this routine implements the
1947 same semantics for constant folding as used by the middle-end. */
1949 /* This was formerly used only for non-IEEE float.
1950 eggert@twinsun.com says it is safe for IEEE also. */
1951 HOST_WIDE_INT xh
, xl
, th
, tl
;
1952 REAL_VALUE_TYPE x
, t
;
1953 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1957 if (REAL_VALUE_ISNAN (x
))
1960 /* Test against the signed upper bound. */
1961 if (width
> HOST_BITS_PER_WIDE_INT
)
1963 th
= ((unsigned HOST_WIDE_INT
) 1
1964 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1970 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1972 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1973 if (REAL_VALUES_LESS (t
, x
))
1980 /* Test against the signed lower bound. */
1981 if (width
> HOST_BITS_PER_WIDE_INT
)
1983 th
= (unsigned HOST_WIDE_INT
) (-1)
1984 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1990 tl
= (unsigned HOST_WIDE_INT
) (-1) << (width
- 1);
1992 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1993 if (REAL_VALUES_LESS (x
, t
))
1999 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
2003 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
2006 /* Test against the unsigned upper bound. */
2007 if (width
== HOST_BITS_PER_DOUBLE_INT
)
2012 else if (width
>= HOST_BITS_PER_WIDE_INT
)
2014 th
= ((unsigned HOST_WIDE_INT
) 1
2015 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
2021 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
2023 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
2024 if (REAL_VALUES_LESS (t
, x
))
2031 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
2037 return immed_double_const (xl
, xh
, mode
);
2043 /* Subroutine of simplify_binary_operation to simplify a commutative,
2044 associative binary operation CODE with result mode MODE, operating
2045 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2046 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2047 canonicalization is possible. */
2050 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
2055 /* Linearize the operator to the left. */
2056 if (GET_CODE (op1
) == code
)
2058 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2059 if (GET_CODE (op0
) == code
)
2061 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
2062 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
2065 /* "a op (b op c)" becomes "(b op c) op a". */
2066 if (! swap_commutative_operands_p (op1
, op0
))
2067 return simplify_gen_binary (code
, mode
, op1
, op0
);
2074 if (GET_CODE (op0
) == code
)
2076 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2077 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
2079 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
2080 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2083 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2084 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
2086 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
2088 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2089 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
2091 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
2098 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2099 and OP1. Return 0 if no simplification is possible.
2101 Don't use this for relational operations such as EQ or LT.
2102 Use simplify_relational_operation instead. */
2104 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
2107 rtx trueop0
, trueop1
;
2110 /* Relational operations don't work here. We must know the mode
2111 of the operands in order to do the comparison correctly.
2112 Assuming a full word can give incorrect results.
2113 Consider comparing 128 with -128 in QImode. */
2114 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
2115 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
2117 /* Make sure the constant is second. */
2118 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
2119 && swap_commutative_operands_p (op0
, op1
))
2121 tem
= op0
, op0
= op1
, op1
= tem
;
2124 trueop0
= avoid_constant_pool_reference (op0
);
2125 trueop1
= avoid_constant_pool_reference (op1
);
2127 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2130 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2133 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2134 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2135 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2136 actual constants. */
2139 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
2140 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2142 rtx tem
, reversed
, opleft
, opright
;
2144 unsigned int width
= GET_MODE_PRECISION (mode
);
2146 /* Even if we can't compute a constant result,
2147 there are some cases worth simplifying. */
2152 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2153 when x is NaN, infinite, or finite and nonzero. They aren't
2154 when x is -0 and the rounding mode is not towards -infinity,
2155 since (-0) + 0 is then 0. */
2156 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2159 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2160 transformations are safe even for IEEE. */
2161 if (GET_CODE (op0
) == NEG
)
2162 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2163 else if (GET_CODE (op1
) == NEG
)
2164 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2166 /* (~a) + 1 -> -a */
2167 if (INTEGRAL_MODE_P (mode
)
2168 && GET_CODE (op0
) == NOT
2169 && trueop1
== const1_rtx
)
2170 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2172 /* Handle both-operands-constant cases. We can only add
2173 CONST_INTs to constants since the sum of relocatable symbols
2174 can't be handled by most assemblers. Don't add CONST_INT
2175 to CONST_INT since overflow won't be computed properly if wider
2176 than HOST_BITS_PER_WIDE_INT. */
2178 if ((GET_CODE (op0
) == CONST
2179 || GET_CODE (op0
) == SYMBOL_REF
2180 || GET_CODE (op0
) == LABEL_REF
)
2181 && CONST_INT_P (op1
))
2182 return plus_constant (mode
, op0
, INTVAL (op1
));
2183 else if ((GET_CODE (op1
) == CONST
2184 || GET_CODE (op1
) == SYMBOL_REF
2185 || GET_CODE (op1
) == LABEL_REF
)
2186 && CONST_INT_P (op0
))
2187 return plus_constant (mode
, op1
, INTVAL (op0
));
2189 /* See if this is something like X * C - X or vice versa or
2190 if the multiplication is written as a shift. If so, we can
2191 distribute and make a new multiply, shift, or maybe just
2192 have X (if C is 2 in the example above). But don't make
2193 something more expensive than we had before. */
2195 if (SCALAR_INT_MODE_P (mode
))
2197 double_int coeff0
, coeff1
;
2198 rtx lhs
= op0
, rhs
= op1
;
2200 coeff0
= double_int_one
;
2201 coeff1
= double_int_one
;
2203 if (GET_CODE (lhs
) == NEG
)
2205 coeff0
= double_int_minus_one
;
2206 lhs
= XEXP (lhs
, 0);
2208 else if (GET_CODE (lhs
) == MULT
2209 && CONST_INT_P (XEXP (lhs
, 1)))
2211 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2212 lhs
= XEXP (lhs
, 0);
2214 else if (GET_CODE (lhs
) == ASHIFT
2215 && CONST_INT_P (XEXP (lhs
, 1))
2216 && INTVAL (XEXP (lhs
, 1)) >= 0
2217 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2219 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2220 lhs
= XEXP (lhs
, 0);
2223 if (GET_CODE (rhs
) == NEG
)
2225 coeff1
= double_int_minus_one
;
2226 rhs
= XEXP (rhs
, 0);
2228 else if (GET_CODE (rhs
) == MULT
2229 && CONST_INT_P (XEXP (rhs
, 1)))
2231 coeff1
= double_int::from_shwi (INTVAL (XEXP (rhs
, 1)));
2232 rhs
= XEXP (rhs
, 0);
2234 else if (GET_CODE (rhs
) == ASHIFT
2235 && CONST_INT_P (XEXP (rhs
, 1))
2236 && INTVAL (XEXP (rhs
, 1)) >= 0
2237 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2239 coeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2240 rhs
= XEXP (rhs
, 0);
2243 if (rtx_equal_p (lhs
, rhs
))
2245 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2248 bool speed
= optimize_function_for_speed_p (cfun
);
2250 val
= coeff0
+ coeff1
;
2251 coeff
= immed_double_int_const (val
, mode
);
2253 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2254 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2259 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2260 if (CONST_SCALAR_INT_P (op1
)
2261 && GET_CODE (op0
) == XOR
2262 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2263 && mode_signbit_p (mode
, op1
))
2264 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2265 simplify_gen_binary (XOR
, mode
, op1
,
2268 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2269 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2270 && GET_CODE (op0
) == MULT
2271 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2275 in1
= XEXP (XEXP (op0
, 0), 0);
2276 in2
= XEXP (op0
, 1);
2277 return simplify_gen_binary (MINUS
, mode
, op1
,
2278 simplify_gen_binary (MULT
, mode
,
2282 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2283 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2285 if (COMPARISON_P (op0
)
2286 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2287 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2288 && (reversed
= reversed_comparison (op0
, mode
)))
2290 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2292 /* If one of the operands is a PLUS or a MINUS, see if we can
2293 simplify this by the associative law.
2294 Don't use the associative law for floating point.
2295 The inaccuracy makes it nonassociative,
2296 and subtle programs can break if operations are associated. */
2298 if (INTEGRAL_MODE_P (mode
)
2299 && (plus_minus_operand_p (op0
)
2300 || plus_minus_operand_p (op1
))
2301 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2304 /* Reassociate floating point addition only when the user
2305 specifies associative math operations. */
2306 if (FLOAT_MODE_P (mode
)
2307 && flag_associative_math
)
2309 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2316 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2317 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2318 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2319 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2321 rtx xop00
= XEXP (op0
, 0);
2322 rtx xop10
= XEXP (op1
, 0);
2325 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2327 if (REG_P (xop00
) && REG_P (xop10
)
2328 && GET_MODE (xop00
) == GET_MODE (xop10
)
2329 && REGNO (xop00
) == REGNO (xop10
)
2330 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2331 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2338 /* We can't assume x-x is 0 even with non-IEEE floating point,
2339 but since it is zero except in very strange circumstances, we
2340 will treat it as zero with -ffinite-math-only. */
2341 if (rtx_equal_p (trueop0
, trueop1
)
2342 && ! side_effects_p (op0
)
2343 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2344 return CONST0_RTX (mode
);
2346 /* Change subtraction from zero into negation. (0 - x) is the
2347 same as -x when x is NaN, infinite, or finite and nonzero.
2348 But if the mode has signed zeros, and does not round towards
2349 -infinity, then 0 - 0 is 0, not -0. */
2350 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2351 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2353 /* (-1 - a) is ~a. */
2354 if (trueop0
== constm1_rtx
)
2355 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2357 /* Subtracting 0 has no effect unless the mode has signed zeros
2358 and supports rounding towards -infinity. In such a case,
2360 if (!(HONOR_SIGNED_ZEROS (mode
)
2361 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2362 && trueop1
== CONST0_RTX (mode
))
2365 /* See if this is something like X * C - X or vice versa or
2366 if the multiplication is written as a shift. If so, we can
2367 distribute and make a new multiply, shift, or maybe just
2368 have X (if C is 2 in the example above). But don't make
2369 something more expensive than we had before. */
2371 if (SCALAR_INT_MODE_P (mode
))
2373 double_int coeff0
, negcoeff1
;
2374 rtx lhs
= op0
, rhs
= op1
;
2376 coeff0
= double_int_one
;
2377 negcoeff1
= double_int_minus_one
;
2379 if (GET_CODE (lhs
) == NEG
)
2381 coeff0
= double_int_minus_one
;
2382 lhs
= XEXP (lhs
, 0);
2384 else if (GET_CODE (lhs
) == MULT
2385 && CONST_INT_P (XEXP (lhs
, 1)))
2387 coeff0
= double_int::from_shwi (INTVAL (XEXP (lhs
, 1)));
2388 lhs
= XEXP (lhs
, 0);
2390 else if (GET_CODE (lhs
) == ASHIFT
2391 && CONST_INT_P (XEXP (lhs
, 1))
2392 && INTVAL (XEXP (lhs
, 1)) >= 0
2393 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2395 coeff0
= double_int_zero
.set_bit (INTVAL (XEXP (lhs
, 1)));
2396 lhs
= XEXP (lhs
, 0);
2399 if (GET_CODE (rhs
) == NEG
)
2401 negcoeff1
= double_int_one
;
2402 rhs
= XEXP (rhs
, 0);
2404 else if (GET_CODE (rhs
) == MULT
2405 && CONST_INT_P (XEXP (rhs
, 1)))
2407 negcoeff1
= double_int::from_shwi (-INTVAL (XEXP (rhs
, 1)));
2408 rhs
= XEXP (rhs
, 0);
2410 else if (GET_CODE (rhs
) == ASHIFT
2411 && CONST_INT_P (XEXP (rhs
, 1))
2412 && INTVAL (XEXP (rhs
, 1)) >= 0
2413 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2415 negcoeff1
= double_int_zero
.set_bit (INTVAL (XEXP (rhs
, 1)));
2416 negcoeff1
= -negcoeff1
;
2417 rhs
= XEXP (rhs
, 0);
2420 if (rtx_equal_p (lhs
, rhs
))
2422 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2425 bool speed
= optimize_function_for_speed_p (cfun
);
2427 val
= coeff0
+ negcoeff1
;
2428 coeff
= immed_double_int_const (val
, mode
);
2430 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2431 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2436 /* (a - (-b)) -> (a + b). True even for IEEE. */
2437 if (GET_CODE (op1
) == NEG
)
2438 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2440 /* (-x - c) may be simplified as (-c - x). */
2441 if (GET_CODE (op0
) == NEG
2442 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2444 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2446 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2449 /* Don't let a relocatable value get a negative coeff. */
2450 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2451 return simplify_gen_binary (PLUS
, mode
,
2453 neg_const_int (mode
, op1
));
2455 /* (x - (x & y)) -> (x & ~y) */
2456 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2458 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2460 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2461 GET_MODE (XEXP (op1
, 1)));
2462 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2464 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2466 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2467 GET_MODE (XEXP (op1
, 0)));
2468 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2472 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2473 by reversing the comparison code if valid. */
2474 if (STORE_FLAG_VALUE
== 1
2475 && trueop0
== const1_rtx
2476 && COMPARISON_P (op1
)
2477 && (reversed
= reversed_comparison (op1
, mode
)))
2480 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2481 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2482 && GET_CODE (op1
) == MULT
2483 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2487 in1
= XEXP (XEXP (op1
, 0), 0);
2488 in2
= XEXP (op1
, 1);
2489 return simplify_gen_binary (PLUS
, mode
,
2490 simplify_gen_binary (MULT
, mode
,
2495 /* Canonicalize (minus (neg A) (mult B C)) to
2496 (minus (mult (neg B) C) A). */
2497 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2498 && GET_CODE (op1
) == MULT
2499 && GET_CODE (op0
) == NEG
)
2503 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2504 in2
= XEXP (op1
, 1);
2505 return simplify_gen_binary (MINUS
, mode
,
2506 simplify_gen_binary (MULT
, mode
,
2511 /* If one of the operands is a PLUS or a MINUS, see if we can
2512 simplify this by the associative law. This will, for example,
2513 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2514 Don't use the associative law for floating point.
2515 The inaccuracy makes it nonassociative,
2516 and subtle programs can break if operations are associated. */
2518 if (INTEGRAL_MODE_P (mode
)
2519 && (plus_minus_operand_p (op0
)
2520 || plus_minus_operand_p (op1
))
2521 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2526 if (trueop1
== constm1_rtx
)
2527 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2529 if (GET_CODE (op0
) == NEG
)
2531 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2532 /* If op1 is a MULT as well and simplify_unary_operation
2533 just moved the NEG to the second operand, simplify_gen_binary
2534 below could through simplify_associative_operation move
2535 the NEG around again and recurse endlessly. */
2537 && GET_CODE (op1
) == MULT
2538 && GET_CODE (temp
) == MULT
2539 && XEXP (op1
, 0) == XEXP (temp
, 0)
2540 && GET_CODE (XEXP (temp
, 1)) == NEG
2541 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2544 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2546 if (GET_CODE (op1
) == NEG
)
2548 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2549 /* If op0 is a MULT as well and simplify_unary_operation
2550 just moved the NEG to the second operand, simplify_gen_binary
2551 below could through simplify_associative_operation move
2552 the NEG around again and recurse endlessly. */
2554 && GET_CODE (op0
) == MULT
2555 && GET_CODE (temp
) == MULT
2556 && XEXP (op0
, 0) == XEXP (temp
, 0)
2557 && GET_CODE (XEXP (temp
, 1)) == NEG
2558 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2561 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2564 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2565 x is NaN, since x * 0 is then also NaN. Nor is it valid
2566 when the mode has signed zeros, since multiplying a negative
2567 number by 0 will give -0, not 0. */
2568 if (!HONOR_NANS (mode
)
2569 && !HONOR_SIGNED_ZEROS (mode
)
2570 && trueop1
== CONST0_RTX (mode
)
2571 && ! side_effects_p (op0
))
2574 /* In IEEE floating point, x*1 is not equivalent to x for
2576 if (!HONOR_SNANS (mode
)
2577 && trueop1
== CONST1_RTX (mode
))
2580 /* Convert multiply by constant power of two into shift unless
2581 we are still generating RTL. This test is a kludge. */
2582 if (CONST_INT_P (trueop1
)
2583 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2584 /* If the mode is larger than the host word size, and the
2585 uppermost bit is set, then this isn't a power of two due
2586 to implicit sign extension. */
2587 && (width
<= HOST_BITS_PER_WIDE_INT
2588 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2589 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2591 /* Likewise for multipliers wider than a word. */
2592 if (CONST_DOUBLE_AS_INT_P (trueop1
)
2593 && GET_MODE (op0
) == mode
2594 && CONST_DOUBLE_LOW (trueop1
) == 0
2595 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0
2596 && (val
< HOST_BITS_PER_DOUBLE_INT
- 1
2597 || GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_DOUBLE_INT
))
2598 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2599 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2601 /* x*2 is x+x and x*(-1) is -x */
2602 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2603 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2604 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2605 && GET_MODE (op0
) == mode
)
2608 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2610 if (REAL_VALUES_EQUAL (d
, dconst2
))
2611 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2613 if (!HONOR_SNANS (mode
)
2614 && REAL_VALUES_EQUAL (d
, dconstm1
))
2615 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2618 /* Optimize -x * -x as x * x. */
2619 if (FLOAT_MODE_P (mode
)
2620 && GET_CODE (op0
) == NEG
2621 && GET_CODE (op1
) == NEG
2622 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2623 && !side_effects_p (XEXP (op0
, 0)))
2624 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2626 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2627 if (SCALAR_FLOAT_MODE_P (mode
)
2628 && GET_CODE (op0
) == ABS
2629 && GET_CODE (op1
) == ABS
2630 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2631 && !side_effects_p (XEXP (op0
, 0)))
2632 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2634 /* Reassociate multiplication, but for floating point MULTs
2635 only when the user specifies unsafe math optimizations. */
2636 if (! FLOAT_MODE_P (mode
)
2637 || flag_unsafe_math_optimizations
)
2639 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2646 if (trueop1
== CONST0_RTX (mode
))
2648 if (INTEGRAL_MODE_P (mode
)
2649 && trueop1
== CONSTM1_RTX (mode
)
2650 && !side_effects_p (op0
))
2652 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2654 /* A | (~A) -> -1 */
2655 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2656 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2657 && ! side_effects_p (op0
)
2658 && SCALAR_INT_MODE_P (mode
))
2661 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2662 if (CONST_INT_P (op1
)
2663 && HWI_COMPUTABLE_MODE_P (mode
)
2664 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2665 && !side_effects_p (op0
))
2668 /* Canonicalize (X & C1) | C2. */
2669 if (GET_CODE (op0
) == AND
2670 && CONST_INT_P (trueop1
)
2671 && CONST_INT_P (XEXP (op0
, 1)))
2673 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2674 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2675 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2677 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2679 && !side_effects_p (XEXP (op0
, 0)))
2682 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2683 if (((c1
|c2
) & mask
) == mask
)
2684 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2686 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2687 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2689 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2690 gen_int_mode (c1
& ~c2
, mode
));
2691 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2695 /* Convert (A & B) | A to A. */
2696 if (GET_CODE (op0
) == AND
2697 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2698 || rtx_equal_p (XEXP (op0
, 1), op1
))
2699 && ! side_effects_p (XEXP (op0
, 0))
2700 && ! side_effects_p (XEXP (op0
, 1)))
2703 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2704 mode size to (rotate A CX). */
2706 if (GET_CODE (op1
) == ASHIFT
2707 || GET_CODE (op1
) == SUBREG
)
2718 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2719 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2720 && CONST_INT_P (XEXP (opleft
, 1))
2721 && CONST_INT_P (XEXP (opright
, 1))
2722 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2723 == GET_MODE_PRECISION (mode
)))
2724 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2726 /* Same, but for ashift that has been "simplified" to a wider mode
2727 by simplify_shift_const. */
2729 if (GET_CODE (opleft
) == SUBREG
2730 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2731 && GET_CODE (opright
) == LSHIFTRT
2732 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2733 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2734 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2735 && (GET_MODE_SIZE (GET_MODE (opleft
))
2736 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2737 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2738 SUBREG_REG (XEXP (opright
, 0)))
2739 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2740 && CONST_INT_P (XEXP (opright
, 1))
2741 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2742 == GET_MODE_PRECISION (mode
)))
2743 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2744 XEXP (SUBREG_REG (opleft
), 1));
2746 /* If we have (ior (and (X C1) C2)), simplify this by making
2747 C1 as small as possible if C1 actually changes. */
2748 if (CONST_INT_P (op1
)
2749 && (HWI_COMPUTABLE_MODE_P (mode
)
2750 || INTVAL (op1
) > 0)
2751 && GET_CODE (op0
) == AND
2752 && CONST_INT_P (XEXP (op0
, 1))
2753 && CONST_INT_P (op1
)
2754 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2755 return simplify_gen_binary (IOR
, mode
,
2757 (AND
, mode
, XEXP (op0
, 0),
2758 GEN_INT (UINTVAL (XEXP (op0
, 1))
2762 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2763 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2764 the PLUS does not affect any of the bits in OP1: then we can do
2765 the IOR as a PLUS and we can associate. This is valid if OP1
2766 can be safely shifted left C bits. */
2767 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2768 && GET_CODE (XEXP (op0
, 0)) == PLUS
2769 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2770 && CONST_INT_P (XEXP (op0
, 1))
2771 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2773 int count
= INTVAL (XEXP (op0
, 1));
2774 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2776 if (mask
>> count
== INTVAL (trueop1
)
2777 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2778 return simplify_gen_binary (ASHIFTRT
, mode
,
2779 plus_constant (mode
, XEXP (op0
, 0),
2784 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2790 if (trueop1
== CONST0_RTX (mode
))
2792 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2793 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2794 if (rtx_equal_p (trueop0
, trueop1
)
2795 && ! side_effects_p (op0
)
2796 && GET_MODE_CLASS (mode
) != MODE_CC
)
2797 return CONST0_RTX (mode
);
2799 /* Canonicalize XOR of the most significant bit to PLUS. */
2800 if (CONST_SCALAR_INT_P (op1
)
2801 && mode_signbit_p (mode
, op1
))
2802 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2803 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2804 if (CONST_SCALAR_INT_P (op1
)
2805 && GET_CODE (op0
) == PLUS
2806 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2807 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2808 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2809 simplify_gen_binary (XOR
, mode
, op1
,
2812 /* If we are XORing two things that have no bits in common,
2813 convert them into an IOR. This helps to detect rotation encoded
2814 using those methods and possibly other simplifications. */
2816 if (HWI_COMPUTABLE_MODE_P (mode
)
2817 && (nonzero_bits (op0
, mode
)
2818 & nonzero_bits (op1
, mode
)) == 0)
2819 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2821 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2822 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2825 int num_negated
= 0;
2827 if (GET_CODE (op0
) == NOT
)
2828 num_negated
++, op0
= XEXP (op0
, 0);
2829 if (GET_CODE (op1
) == NOT
)
2830 num_negated
++, op1
= XEXP (op1
, 0);
2832 if (num_negated
== 2)
2833 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2834 else if (num_negated
== 1)
2835 return simplify_gen_unary (NOT
, mode
,
2836 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2840 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2841 correspond to a machine insn or result in further simplifications
2842 if B is a constant. */
2844 if (GET_CODE (op0
) == AND
2845 && rtx_equal_p (XEXP (op0
, 1), op1
)
2846 && ! side_effects_p (op1
))
2847 return simplify_gen_binary (AND
, mode
,
2848 simplify_gen_unary (NOT
, mode
,
2849 XEXP (op0
, 0), mode
),
2852 else if (GET_CODE (op0
) == AND
2853 && rtx_equal_p (XEXP (op0
, 0), op1
)
2854 && ! side_effects_p (op1
))
2855 return simplify_gen_binary (AND
, mode
,
2856 simplify_gen_unary (NOT
, mode
,
2857 XEXP (op0
, 1), mode
),
2860 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2861 we can transform like this:
2862 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2863 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2864 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2865 Attempt a few simplifications when B and C are both constants. */
2866 if (GET_CODE (op0
) == AND
2867 && CONST_INT_P (op1
)
2868 && CONST_INT_P (XEXP (op0
, 1)))
2870 rtx a
= XEXP (op0
, 0);
2871 rtx b
= XEXP (op0
, 1);
2873 HOST_WIDE_INT bval
= INTVAL (b
);
2874 HOST_WIDE_INT cval
= INTVAL (c
);
2877 = simplify_binary_operation (AND
, mode
,
2878 simplify_gen_unary (NOT
, mode
, a
, mode
),
2880 if ((~cval
& bval
) == 0)
2882 /* Try to simplify ~A&C | ~B&C. */
2883 if (na_c
!= NULL_RTX
)
2884 return simplify_gen_binary (IOR
, mode
, na_c
,
2885 GEN_INT (~bval
& cval
));
2889 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2890 if (na_c
== const0_rtx
)
2892 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2893 GEN_INT (~cval
& bval
));
2894 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2895 GEN_INT (~bval
& cval
));
2900 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2901 comparison if STORE_FLAG_VALUE is 1. */
2902 if (STORE_FLAG_VALUE
== 1
2903 && trueop1
== const1_rtx
2904 && COMPARISON_P (op0
)
2905 && (reversed
= reversed_comparison (op0
, mode
)))
2908 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2909 is (lt foo (const_int 0)), so we can perform the above
2910 simplification if STORE_FLAG_VALUE is 1. */
2912 if (STORE_FLAG_VALUE
== 1
2913 && trueop1
== const1_rtx
2914 && GET_CODE (op0
) == LSHIFTRT
2915 && CONST_INT_P (XEXP (op0
, 1))
2916 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2917 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2919 /* (xor (comparison foo bar) (const_int sign-bit))
2920 when STORE_FLAG_VALUE is the sign bit. */
2921 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2922 && trueop1
== const_true_rtx
2923 && COMPARISON_P (op0
)
2924 && (reversed
= reversed_comparison (op0
, mode
)))
2927 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2933 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2935 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2937 if (HWI_COMPUTABLE_MODE_P (mode
))
2939 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2940 HOST_WIDE_INT nzop1
;
2941 if (CONST_INT_P (trueop1
))
2943 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2944 /* If we are turning off bits already known off in OP0, we need
2946 if ((nzop0
& ~val1
) == 0)
2949 nzop1
= nonzero_bits (trueop1
, mode
);
2950 /* If we are clearing all the nonzero bits, the result is zero. */
2951 if ((nzop1
& nzop0
) == 0
2952 && !side_effects_p (op0
) && !side_effects_p (op1
))
2953 return CONST0_RTX (mode
);
2955 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2956 && GET_MODE_CLASS (mode
) != MODE_CC
)
2959 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2960 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2961 && ! side_effects_p (op0
)
2962 && GET_MODE_CLASS (mode
) != MODE_CC
)
2963 return CONST0_RTX (mode
);
2965 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2966 there are no nonzero bits of C outside of X's mode. */
2967 if ((GET_CODE (op0
) == SIGN_EXTEND
2968 || GET_CODE (op0
) == ZERO_EXTEND
)
2969 && CONST_INT_P (trueop1
)
2970 && HWI_COMPUTABLE_MODE_P (mode
)
2971 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2972 & UINTVAL (trueop1
)) == 0)
2974 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2975 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2976 gen_int_mode (INTVAL (trueop1
),
2978 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2981 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2982 we might be able to further simplify the AND with X and potentially
2983 remove the truncation altogether. */
2984 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2986 rtx x
= XEXP (op0
, 0);
2987 enum machine_mode xmode
= GET_MODE (x
);
2988 tem
= simplify_gen_binary (AND
, xmode
, x
,
2989 gen_int_mode (INTVAL (trueop1
), xmode
));
2990 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2993 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2994 if (GET_CODE (op0
) == IOR
2995 && CONST_INT_P (trueop1
)
2996 && CONST_INT_P (XEXP (op0
, 1)))
2998 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2999 return simplify_gen_binary (IOR
, mode
,
3000 simplify_gen_binary (AND
, mode
,
3001 XEXP (op0
, 0), op1
),
3002 gen_int_mode (tmp
, mode
));
3005 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3006 insn (and may simplify more). */
3007 if (GET_CODE (op0
) == XOR
3008 && rtx_equal_p (XEXP (op0
, 0), op1
)
3009 && ! side_effects_p (op1
))
3010 return simplify_gen_binary (AND
, mode
,
3011 simplify_gen_unary (NOT
, mode
,
3012 XEXP (op0
, 1), mode
),
3015 if (GET_CODE (op0
) == XOR
3016 && rtx_equal_p (XEXP (op0
, 1), op1
)
3017 && ! side_effects_p (op1
))
3018 return simplify_gen_binary (AND
, mode
,
3019 simplify_gen_unary (NOT
, mode
,
3020 XEXP (op0
, 0), mode
),
3023 /* Similarly for (~(A ^ B)) & A. */
3024 if (GET_CODE (op0
) == NOT
3025 && GET_CODE (XEXP (op0
, 0)) == XOR
3026 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
3027 && ! side_effects_p (op1
))
3028 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
3030 if (GET_CODE (op0
) == NOT
3031 && GET_CODE (XEXP (op0
, 0)) == XOR
3032 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
3033 && ! side_effects_p (op1
))
3034 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
3036 /* Convert (A | B) & A to A. */
3037 if (GET_CODE (op0
) == IOR
3038 && (rtx_equal_p (XEXP (op0
, 0), op1
)
3039 || rtx_equal_p (XEXP (op0
, 1), op1
))
3040 && ! side_effects_p (XEXP (op0
, 0))
3041 && ! side_effects_p (XEXP (op0
, 1)))
3044 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3045 ((A & N) + B) & M -> (A + B) & M
3046 Similarly if (N & M) == 0,
3047 ((A | N) + B) & M -> (A + B) & M
3048 and for - instead of + and/or ^ instead of |.
3049 Also, if (N & M) == 0, then
3050 (A +- N) & M -> A & M. */
3051 if (CONST_INT_P (trueop1
)
3052 && HWI_COMPUTABLE_MODE_P (mode
)
3053 && ~UINTVAL (trueop1
)
3054 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
3055 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
3060 pmop
[0] = XEXP (op0
, 0);
3061 pmop
[1] = XEXP (op0
, 1);
3063 if (CONST_INT_P (pmop
[1])
3064 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
3065 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
3067 for (which
= 0; which
< 2; which
++)
3070 switch (GET_CODE (tem
))
3073 if (CONST_INT_P (XEXP (tem
, 1))
3074 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3075 == UINTVAL (trueop1
))
3076 pmop
[which
] = XEXP (tem
, 0);
3080 if (CONST_INT_P (XEXP (tem
, 1))
3081 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3082 pmop
[which
] = XEXP (tem
, 0);
3089 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3091 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3093 return simplify_gen_binary (code
, mode
, tem
, op1
);
3097 /* (and X (ior (not X) Y) -> (and X Y) */
3098 if (GET_CODE (op1
) == IOR
3099 && GET_CODE (XEXP (op1
, 0)) == NOT
3100 && op0
== XEXP (XEXP (op1
, 0), 0))
3101 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3103 /* (and (ior (not X) Y) X) -> (and X Y) */
3104 if (GET_CODE (op0
) == IOR
3105 && GET_CODE (XEXP (op0
, 0)) == NOT
3106 && op1
== XEXP (XEXP (op0
, 0), 0))
3107 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3109 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3115 /* 0/x is 0 (or x&0 if x has side-effects). */
3116 if (trueop0
== CONST0_RTX (mode
))
3118 if (side_effects_p (op1
))
3119 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3123 if (trueop1
== CONST1_RTX (mode
))
3125 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3129 /* Convert divide by power of two into shift. */
3130 if (CONST_INT_P (trueop1
)
3131 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3132 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3136 /* Handle floating point and integers separately. */
3137 if (SCALAR_FLOAT_MODE_P (mode
))
3139 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3140 safe for modes with NaNs, since 0.0 / 0.0 will then be
3141 NaN rather than 0.0. Nor is it safe for modes with signed
3142 zeros, since dividing 0 by a negative number gives -0.0 */
3143 if (trueop0
== CONST0_RTX (mode
)
3144 && !HONOR_NANS (mode
)
3145 && !HONOR_SIGNED_ZEROS (mode
)
3146 && ! side_effects_p (op1
))
3149 if (trueop1
== CONST1_RTX (mode
)
3150 && !HONOR_SNANS (mode
))
3153 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3154 && trueop1
!= CONST0_RTX (mode
))
3157 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
3160 if (REAL_VALUES_EQUAL (d
, dconstm1
)
3161 && !HONOR_SNANS (mode
))
3162 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3164 /* Change FP division by a constant into multiplication.
3165 Only do this with -freciprocal-math. */
3166 if (flag_reciprocal_math
3167 && !REAL_VALUES_EQUAL (d
, dconst0
))
3169 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
3170 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
3171 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3175 else if (SCALAR_INT_MODE_P (mode
))
3177 /* 0/x is 0 (or x&0 if x has side-effects). */
3178 if (trueop0
== CONST0_RTX (mode
)
3179 && !cfun
->can_throw_non_call_exceptions
)
3181 if (side_effects_p (op1
))
3182 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3186 if (trueop1
== CONST1_RTX (mode
))
3188 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3193 if (trueop1
== constm1_rtx
)
3195 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3197 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3203 /* 0%x is 0 (or x&0 if x has side-effects). */
3204 if (trueop0
== CONST0_RTX (mode
))
3206 if (side_effects_p (op1
))
3207 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3210 /* x%1 is 0 (of x&0 if x has side-effects). */
3211 if (trueop1
== CONST1_RTX (mode
))
3213 if (side_effects_p (op0
))
3214 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3215 return CONST0_RTX (mode
);
3217 /* Implement modulus by power of two as AND. */
3218 if (CONST_INT_P (trueop1
)
3219 && exact_log2 (UINTVAL (trueop1
)) > 0)
3220 return simplify_gen_binary (AND
, mode
, op0
,
3221 GEN_INT (INTVAL (op1
) - 1));
3225 /* 0%x is 0 (or x&0 if x has side-effects). */
3226 if (trueop0
== CONST0_RTX (mode
))
3228 if (side_effects_p (op1
))
3229 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3232 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3233 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3235 if (side_effects_p (op0
))
3236 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3237 return CONST0_RTX (mode
);
3244 if (trueop1
== CONST0_RTX (mode
))
3246 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3248 /* Rotating ~0 always results in ~0. */
3249 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3250 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3251 && ! side_effects_p (op1
))
3254 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3256 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3257 if (val
!= INTVAL (op1
))
3258 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3265 if (trueop1
== CONST0_RTX (mode
))
3267 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3269 goto canonicalize_shift
;
3272 if (trueop1
== CONST0_RTX (mode
))
3274 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3276 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3277 if (GET_CODE (op0
) == CLZ
3278 && CONST_INT_P (trueop1
)
3279 && STORE_FLAG_VALUE
== 1
3280 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3282 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3283 unsigned HOST_WIDE_INT zero_val
= 0;
3285 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3286 && zero_val
== GET_MODE_PRECISION (imode
)
3287 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3288 return simplify_gen_relational (EQ
, mode
, imode
,
3289 XEXP (op0
, 0), const0_rtx
);
3291 goto canonicalize_shift
;
3294 if (width
<= HOST_BITS_PER_WIDE_INT
3295 && mode_signbit_p (mode
, trueop1
)
3296 && ! side_effects_p (op0
))
3298 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3300 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3306 if (width
<= HOST_BITS_PER_WIDE_INT
3307 && CONST_INT_P (trueop1
)
3308 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3309 && ! side_effects_p (op0
))
3311 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3313 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3319 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3321 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3323 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3329 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3331 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3333 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3346 /* ??? There are simplifications that can be done. */
3350 if (!VECTOR_MODE_P (mode
))
3352 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3353 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3354 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3355 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3356 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3358 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3359 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3362 /* Extract a scalar element from a nested VEC_SELECT expression
3363 (with optional nested VEC_CONCAT expression). Some targets
3364 (i386) extract scalar element from a vector using chain of
3365 nested VEC_SELECT expressions. When input operand is a memory
3366 operand, this operation can be simplified to a simple scalar
3367 load from an offseted memory address. */
3368 if (GET_CODE (trueop0
) == VEC_SELECT
)
3370 rtx op0
= XEXP (trueop0
, 0);
3371 rtx op1
= XEXP (trueop0
, 1);
3373 enum machine_mode opmode
= GET_MODE (op0
);
3374 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3375 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3377 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3383 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3384 gcc_assert (i
< n_elts
);
3386 /* Select element, pointed by nested selector. */
3387 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3389 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3390 if (GET_CODE (op0
) == VEC_CONCAT
)
3392 rtx op00
= XEXP (op0
, 0);
3393 rtx op01
= XEXP (op0
, 1);
3395 enum machine_mode mode00
, mode01
;
3396 int n_elts00
, n_elts01
;
3398 mode00
= GET_MODE (op00
);
3399 mode01
= GET_MODE (op01
);
3401 /* Find out number of elements of each operand. */
3402 if (VECTOR_MODE_P (mode00
))
3404 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3405 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3410 if (VECTOR_MODE_P (mode01
))
3412 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3413 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3418 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3420 /* Select correct operand of VEC_CONCAT
3421 and adjust selector. */
3422 if (elem
< n_elts01
)
3433 vec
= rtvec_alloc (1);
3434 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3436 tmp
= gen_rtx_fmt_ee (code
, mode
,
3437 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3440 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3441 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3442 return XEXP (trueop0
, 0);
3446 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3447 gcc_assert (GET_MODE_INNER (mode
)
3448 == GET_MODE_INNER (GET_MODE (trueop0
)));
3449 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3451 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3453 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3454 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3455 rtvec v
= rtvec_alloc (n_elts
);
3458 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3459 for (i
= 0; i
< n_elts
; i
++)
3461 rtx x
= XVECEXP (trueop1
, 0, i
);
3463 gcc_assert (CONST_INT_P (x
));
3464 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3468 return gen_rtx_CONST_VECTOR (mode
, v
);
3471 /* Recognize the identity. */
3472 if (GET_MODE (trueop0
) == mode
)
3474 bool maybe_ident
= true;
3475 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3477 rtx j
= XVECEXP (trueop1
, 0, i
);
3478 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3480 maybe_ident
= false;
3488 /* If we build {a,b} then permute it, build the result directly. */
3489 if (XVECLEN (trueop1
, 0) == 2
3490 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3491 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3492 && GET_CODE (trueop0
) == VEC_CONCAT
3493 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3494 && GET_MODE (XEXP (trueop0
, 0)) == mode
3495 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3496 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3498 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3499 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3502 gcc_assert (i0
< 4 && i1
< 4);
3503 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3504 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3506 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3509 if (XVECLEN (trueop1
, 0) == 2
3510 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3511 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3512 && GET_CODE (trueop0
) == VEC_CONCAT
3513 && GET_MODE (trueop0
) == mode
)
3515 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3516 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3519 gcc_assert (i0
< 2 && i1
< 2);
3520 subop0
= XEXP (trueop0
, i0
);
3521 subop1
= XEXP (trueop0
, i1
);
3523 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3527 if (XVECLEN (trueop1
, 0) == 1
3528 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3529 && GET_CODE (trueop0
) == VEC_CONCAT
)
3532 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3534 /* Try to find the element in the VEC_CONCAT. */
3535 while (GET_MODE (vec
) != mode
3536 && GET_CODE (vec
) == VEC_CONCAT
)
3538 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3539 if (offset
< vec_size
)
3540 vec
= XEXP (vec
, 0);
3544 vec
= XEXP (vec
, 1);
3546 vec
= avoid_constant_pool_reference (vec
);
3549 if (GET_MODE (vec
) == mode
)
3556 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3557 ? GET_MODE (trueop0
)
3558 : GET_MODE_INNER (mode
));
3559 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3560 ? GET_MODE (trueop1
)
3561 : GET_MODE_INNER (mode
));
3563 gcc_assert (VECTOR_MODE_P (mode
));
3564 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3565 == GET_MODE_SIZE (mode
));
3567 if (VECTOR_MODE_P (op0_mode
))
3568 gcc_assert (GET_MODE_INNER (mode
)
3569 == GET_MODE_INNER (op0_mode
));
3571 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3573 if (VECTOR_MODE_P (op1_mode
))
3574 gcc_assert (GET_MODE_INNER (mode
)
3575 == GET_MODE_INNER (op1_mode
));
3577 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3579 if ((GET_CODE (trueop0
) == CONST_VECTOR
3580 || CONST_SCALAR_INT_P (trueop0
)
3581 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3582 && (GET_CODE (trueop1
) == CONST_VECTOR
3583 || CONST_SCALAR_INT_P (trueop1
)
3584 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3586 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3587 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3588 rtvec v
= rtvec_alloc (n_elts
);
3590 unsigned in_n_elts
= 1;
3592 if (VECTOR_MODE_P (op0_mode
))
3593 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3594 for (i
= 0; i
< n_elts
; i
++)
3598 if (!VECTOR_MODE_P (op0_mode
))
3599 RTVEC_ELT (v
, i
) = trueop0
;
3601 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3605 if (!VECTOR_MODE_P (op1_mode
))
3606 RTVEC_ELT (v
, i
) = trueop1
;
3608 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3613 return gen_rtx_CONST_VECTOR (mode
, v
);
3616 /* Try to merge VEC_SELECTs from the same vector into a single one. */
3617 if (GET_CODE (trueop0
) == VEC_SELECT
3618 && GET_CODE (trueop1
) == VEC_SELECT
3619 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0)))
3621 rtx par0
= XEXP (trueop0
, 1);
3622 rtx par1
= XEXP (trueop1
, 1);
3623 int len0
= XVECLEN (par0
, 0);
3624 int len1
= XVECLEN (par1
, 0);
3625 rtvec vec
= rtvec_alloc (len0
+ len1
);
3626 for (int i
= 0; i
< len0
; i
++)
3627 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3628 for (int i
= 0; i
< len1
; i
++)
3629 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3630 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3631 gen_rtx_PARALLEL (VOIDmode
, vec
));
3644 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3647 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3649 unsigned int width
= GET_MODE_PRECISION (mode
);
3651 if (VECTOR_MODE_P (mode
)
3652 && code
!= VEC_CONCAT
3653 && GET_CODE (op0
) == CONST_VECTOR
3654 && GET_CODE (op1
) == CONST_VECTOR
)
3656 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3657 enum machine_mode op0mode
= GET_MODE (op0
);
3658 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3659 enum machine_mode op1mode
= GET_MODE (op1
);
3660 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3661 rtvec v
= rtvec_alloc (n_elts
);
3664 gcc_assert (op0_n_elts
== n_elts
);
3665 gcc_assert (op1_n_elts
== n_elts
);
3666 for (i
= 0; i
< n_elts
; i
++)
3668 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3669 CONST_VECTOR_ELT (op0
, i
),
3670 CONST_VECTOR_ELT (op1
, i
));
3673 RTVEC_ELT (v
, i
) = x
;
3676 return gen_rtx_CONST_VECTOR (mode
, v
);
3679 if (VECTOR_MODE_P (mode
)
3680 && code
== VEC_CONCAT
3681 && (CONST_SCALAR_INT_P (op0
)
3682 || GET_CODE (op0
) == CONST_FIXED
3683 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3684 && (CONST_SCALAR_INT_P (op1
)
3685 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3686 || GET_CODE (op1
) == CONST_FIXED
))
3688 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3689 rtvec v
= rtvec_alloc (n_elts
);
3691 gcc_assert (n_elts
>= 2);
3694 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3695 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3697 RTVEC_ELT (v
, 0) = op0
;
3698 RTVEC_ELT (v
, 1) = op1
;
3702 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3703 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3706 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3707 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3708 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3710 for (i
= 0; i
< op0_n_elts
; ++i
)
3711 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3712 for (i
= 0; i
< op1_n_elts
; ++i
)
3713 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3716 return gen_rtx_CONST_VECTOR (mode
, v
);
3719 if (SCALAR_FLOAT_MODE_P (mode
)
3720 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3721 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3722 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3733 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3735 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3737 for (i
= 0; i
< 4; i
++)
3754 real_from_target (&r
, tmp0
, mode
);
3755 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3759 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3762 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3763 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3764 real_convert (&f0
, mode
, &f0
);
3765 real_convert (&f1
, mode
, &f1
);
3767 if (HONOR_SNANS (mode
)
3768 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3772 && REAL_VALUES_EQUAL (f1
, dconst0
)
3773 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3776 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3777 && flag_trapping_math
3778 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3780 int s0
= REAL_VALUE_NEGATIVE (f0
);
3781 int s1
= REAL_VALUE_NEGATIVE (f1
);
3786 /* Inf + -Inf = NaN plus exception. */
3791 /* Inf - Inf = NaN plus exception. */
3796 /* Inf / Inf = NaN plus exception. */
3803 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3804 && flag_trapping_math
3805 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3806 || (REAL_VALUE_ISINF (f1
)
3807 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3808 /* Inf * 0 = NaN plus exception. */
3811 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3813 real_convert (&result
, mode
, &value
);
3815 /* Don't constant fold this floating point operation if
3816 the result has overflowed and flag_trapping_math. */
3818 if (flag_trapping_math
3819 && MODE_HAS_INFINITIES (mode
)
3820 && REAL_VALUE_ISINF (result
)
3821 && !REAL_VALUE_ISINF (f0
)
3822 && !REAL_VALUE_ISINF (f1
))
3823 /* Overflow plus exception. */
3826 /* Don't constant fold this floating point operation if the
3827 result may dependent upon the run-time rounding mode and
3828 flag_rounding_math is set, or if GCC's software emulation
3829 is unable to accurately represent the result. */
3831 if ((flag_rounding_math
3832 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3833 && (inexact
|| !real_identical (&result
, &value
)))
3836 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3840 /* We can fold some multi-word operations. */
3841 if (GET_MODE_CLASS (mode
) == MODE_INT
3842 && width
== HOST_BITS_PER_DOUBLE_INT
3843 && (CONST_DOUBLE_AS_INT_P (op0
) || CONST_INT_P (op0
))
3844 && (CONST_DOUBLE_AS_INT_P (op1
) || CONST_INT_P (op1
)))
3846 double_int o0
, o1
, res
, tmp
;
3849 o0
= rtx_to_double_int (op0
);
3850 o1
= rtx_to_double_int (op1
);
3855 /* A - B == A + (-B). */
3858 /* Fall through.... */
3869 res
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
3876 tmp
= o0
.divmod_with_overflow (o1
, false, TRUNC_DIV_EXPR
,
3883 res
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
3890 tmp
= o0
.divmod_with_overflow (o1
, true, TRUNC_DIV_EXPR
,
3924 case LSHIFTRT
: case ASHIFTRT
:
3926 case ROTATE
: case ROTATERT
:
3928 unsigned HOST_WIDE_INT cnt
;
3930 if (SHIFT_COUNT_TRUNCATED
)
3933 o1
.low
&= GET_MODE_PRECISION (mode
) - 1;
3936 if (!o1
.fits_uhwi ()
3937 || o1
.to_uhwi () >= GET_MODE_PRECISION (mode
))
3940 cnt
= o1
.to_uhwi ();
3941 unsigned short prec
= GET_MODE_PRECISION (mode
);
3943 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3944 res
= o0
.rshift (cnt
, prec
, code
== ASHIFTRT
);
3945 else if (code
== ASHIFT
)
3946 res
= o0
.alshift (cnt
, prec
);
3947 else if (code
== ROTATE
)
3948 res
= o0
.lrotate (cnt
, prec
);
3949 else /* code == ROTATERT */
3950 res
= o0
.rrotate (cnt
, prec
);
3958 return immed_double_int_const (res
, mode
);
3961 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3962 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3964 /* Get the integer argument values in two forms:
3965 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3967 arg0
= INTVAL (op0
);
3968 arg1
= INTVAL (op1
);
3970 if (width
< HOST_BITS_PER_WIDE_INT
)
3972 arg0
&= GET_MODE_MASK (mode
);
3973 arg1
&= GET_MODE_MASK (mode
);
3976 if (val_signbit_known_set_p (mode
, arg0s
))
3977 arg0s
|= ~GET_MODE_MASK (mode
);
3980 if (val_signbit_known_set_p (mode
, arg1s
))
3981 arg1s
|= ~GET_MODE_MASK (mode
);
3989 /* Compute the value of the arithmetic. */
3994 val
= arg0s
+ arg1s
;
3998 val
= arg0s
- arg1s
;
4002 val
= arg0s
* arg1s
;
4007 || ((unsigned HOST_WIDE_INT
) arg0s
4008 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4011 val
= arg0s
/ arg1s
;
4016 || ((unsigned HOST_WIDE_INT
) arg0s
4017 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4020 val
= arg0s
% arg1s
;
4025 || ((unsigned HOST_WIDE_INT
) arg0s
4026 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4029 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
4034 || ((unsigned HOST_WIDE_INT
) arg0s
4035 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
4038 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
4056 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
4057 the value is in range. We can't return any old value for
4058 out-of-range arguments because either the middle-end (via
4059 shift_truncation_mask) or the back-end might be relying on
4060 target-specific knowledge. Nor can we rely on
4061 shift_truncation_mask, since the shift might not be part of an
4062 ashlM3, lshrM3 or ashrM3 instruction. */
4063 if (SHIFT_COUNT_TRUNCATED
)
4064 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
4065 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
4068 val
= (code
== ASHIFT
4069 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
4070 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
4072 /* Sign-extend the result for arithmetic right shifts. */
4073 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
4074 val
|= ((unsigned HOST_WIDE_INT
) (-1)) << (width
- arg1
);
4082 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
4083 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
4091 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
4092 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
4096 /* Do nothing here. */
4100 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
4104 val
= ((unsigned HOST_WIDE_INT
) arg0
4105 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4109 val
= arg0s
> arg1s
? arg0s
: arg1s
;
4113 val
= ((unsigned HOST_WIDE_INT
) arg0
4114 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
4127 /* ??? There are simplifications that can be done. */
4134 return gen_int_mode (val
, mode
);
4142 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
4145 Rather than test for specific case, we do this by a brute-force method
4146 and do all possible simplifications until no more changes occur. Then
4147 we rebuild the operation. */
4149 struct simplify_plus_minus_op_data
4156 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4160 result
= (commutative_operand_precedence (y
)
4161 - commutative_operand_precedence (x
));
4165 /* Group together equal REGs to do more simplification. */
4166 if (REG_P (x
) && REG_P (y
))
4167 return REGNO (x
) > REGNO (y
);
4173 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
4176 struct simplify_plus_minus_op_data ops
[8];
4178 int n_ops
= 2, input_ops
= 2;
4179 int changed
, n_constants
= 0, canonicalized
= 0;
4182 memset (ops
, 0, sizeof ops
);
4184 /* Set up the two operands and then expand them until nothing has been
4185 changed. If we run out of room in our array, give up; this should
4186 almost never happen. */
4191 ops
[1].neg
= (code
== MINUS
);
4197 for (i
= 0; i
< n_ops
; i
++)
4199 rtx this_op
= ops
[i
].op
;
4200 int this_neg
= ops
[i
].neg
;
4201 enum rtx_code this_code
= GET_CODE (this_op
);
4210 ops
[n_ops
].op
= XEXP (this_op
, 1);
4211 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4214 ops
[i
].op
= XEXP (this_op
, 0);
4217 canonicalized
|= this_neg
;
4221 ops
[i
].op
= XEXP (this_op
, 0);
4222 ops
[i
].neg
= ! this_neg
;
4229 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4230 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4231 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4233 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4234 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4235 ops
[n_ops
].neg
= this_neg
;
4243 /* ~a -> (-a - 1) */
4246 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4247 ops
[n_ops
++].neg
= this_neg
;
4248 ops
[i
].op
= XEXP (this_op
, 0);
4249 ops
[i
].neg
= !this_neg
;
4259 ops
[i
].op
= neg_const_int (mode
, this_op
);
4273 if (n_constants
> 1)
4276 gcc_assert (n_ops
>= 2);
4278 /* If we only have two operands, we can avoid the loops. */
4281 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4284 /* Get the two operands. Be careful with the order, especially for
4285 the cases where code == MINUS. */
4286 if (ops
[0].neg
&& ops
[1].neg
)
4288 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4291 else if (ops
[0].neg
)
4302 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4305 /* Now simplify each pair of operands until nothing changes. */
4308 /* Insertion sort is good enough for an eight-element array. */
4309 for (i
= 1; i
< n_ops
; i
++)
4311 struct simplify_plus_minus_op_data save
;
4313 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4319 ops
[j
+ 1] = ops
[j
];
4320 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4325 for (i
= n_ops
- 1; i
> 0; i
--)
4326 for (j
= i
- 1; j
>= 0; j
--)
4328 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4329 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4331 if (lhs
!= 0 && rhs
!= 0)
4333 enum rtx_code ncode
= PLUS
;
4339 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4341 else if (swap_commutative_operands_p (lhs
, rhs
))
4342 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4344 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4345 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4347 rtx tem_lhs
, tem_rhs
;
4349 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4350 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4351 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4353 if (tem
&& !CONSTANT_P (tem
))
4354 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4357 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4359 /* Reject "simplifications" that just wrap the two
4360 arguments in a CONST. Failure to do so can result
4361 in infinite recursion with simplify_binary_operation
4362 when it calls us to simplify CONST operations. */
4364 && ! (GET_CODE (tem
) == CONST
4365 && GET_CODE (XEXP (tem
, 0)) == ncode
4366 && XEXP (XEXP (tem
, 0), 0) == lhs
4367 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4370 if (GET_CODE (tem
) == NEG
)
4371 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4372 if (CONST_INT_P (tem
) && lneg
)
4373 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4377 ops
[j
].op
= NULL_RTX
;
4384 /* If nothing changed, fail. */
4388 /* Pack all the operands to the lower-numbered entries. */
4389 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4399 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4401 && CONST_INT_P (ops
[1].op
)
4402 && CONSTANT_P (ops
[0].op
)
4404 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4406 /* We suppressed creation of trivial CONST expressions in the
4407 combination loop to avoid recursion. Create one manually now.
4408 The combination loop should have ensured that there is exactly
4409 one CONST_INT, and the sort will have ensured that it is last
4410 in the array and that any other constant will be next-to-last. */
4413 && CONST_INT_P (ops
[n_ops
- 1].op
)
4414 && CONSTANT_P (ops
[n_ops
- 2].op
))
4416 rtx value
= ops
[n_ops
- 1].op
;
4417 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4418 value
= neg_const_int (mode
, value
);
4419 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4424 /* Put a non-negated operand first, if possible. */
4426 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4429 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4438 /* Now make the result by performing the requested operations. */
4440 for (i
= 1; i
< n_ops
; i
++)
4441 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4442 mode
, result
, ops
[i
].op
);
4447 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4449 plus_minus_operand_p (const_rtx x
)
4451 return GET_CODE (x
) == PLUS
4452 || GET_CODE (x
) == MINUS
4453 || (GET_CODE (x
) == CONST
4454 && GET_CODE (XEXP (x
, 0)) == PLUS
4455 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4456 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4459 /* Like simplify_binary_operation except used for relational operators.
4460 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4461 not also be VOIDmode.
4463 CMP_MODE specifies in which mode the comparison is done in, so it is
4464 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4465 the operands or, if both are VOIDmode, the operands are compared in
4466 "infinite precision". */
4468 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4469 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4471 rtx tem
, trueop0
, trueop1
;
4473 if (cmp_mode
== VOIDmode
)
4474 cmp_mode
= GET_MODE (op0
);
4475 if (cmp_mode
== VOIDmode
)
4476 cmp_mode
= GET_MODE (op1
);
4478 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4481 if (SCALAR_FLOAT_MODE_P (mode
))
4483 if (tem
== const0_rtx
)
4484 return CONST0_RTX (mode
);
4485 #ifdef FLOAT_STORE_FLAG_VALUE
4487 REAL_VALUE_TYPE val
;
4488 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4489 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4495 if (VECTOR_MODE_P (mode
))
4497 if (tem
== const0_rtx
)
4498 return CONST0_RTX (mode
);
4499 #ifdef VECTOR_STORE_FLAG_VALUE
4504 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4505 if (val
== NULL_RTX
)
4507 if (val
== const1_rtx
)
4508 return CONST1_RTX (mode
);
4510 units
= GET_MODE_NUNITS (mode
);
4511 v
= rtvec_alloc (units
);
4512 for (i
= 0; i
< units
; i
++)
4513 RTVEC_ELT (v
, i
) = val
;
4514 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4524 /* For the following tests, ensure const0_rtx is op1. */
4525 if (swap_commutative_operands_p (op0
, op1
)
4526 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4527 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4529 /* If op0 is a compare, extract the comparison arguments from it. */
4530 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4531 return simplify_gen_relational (code
, mode
, VOIDmode
,
4532 XEXP (op0
, 0), XEXP (op0
, 1));
4534 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4538 trueop0
= avoid_constant_pool_reference (op0
);
4539 trueop1
= avoid_constant_pool_reference (op1
);
4540 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4544 /* This part of simplify_relational_operation is only used when CMP_MODE
4545 is not in class MODE_CC (i.e. it is a real comparison).
4547 MODE is the mode of the result, while CMP_MODE specifies in which
4548 mode the comparison is done in, so it is the mode of the operands. */
4551 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4552 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4554 enum rtx_code op0code
= GET_CODE (op0
);
4556 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4558 /* If op0 is a comparison, extract the comparison arguments
4562 if (GET_MODE (op0
) == mode
)
4563 return simplify_rtx (op0
);
4565 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4566 XEXP (op0
, 0), XEXP (op0
, 1));
4568 else if (code
== EQ
)
4570 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4571 if (new_code
!= UNKNOWN
)
4572 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4573 XEXP (op0
, 0), XEXP (op0
, 1));
4577 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4578 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4579 if ((code
== LTU
|| code
== GEU
)
4580 && GET_CODE (op0
) == PLUS
4581 && CONST_INT_P (XEXP (op0
, 1))
4582 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4583 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4584 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4585 && XEXP (op0
, 1) != const0_rtx
)
4588 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4589 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4590 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4593 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4594 if ((code
== LTU
|| code
== GEU
)
4595 && GET_CODE (op0
) == PLUS
4596 && rtx_equal_p (op1
, XEXP (op0
, 1))
4597 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4598 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4599 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4600 copy_rtx (XEXP (op0
, 0)));
4602 if (op1
== const0_rtx
)
4604 /* Canonicalize (GTU x 0) as (NE x 0). */
4606 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4607 /* Canonicalize (LEU x 0) as (EQ x 0). */
4609 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4611 else if (op1
== const1_rtx
)
4616 /* Canonicalize (GE x 1) as (GT x 0). */
4617 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4620 /* Canonicalize (GEU x 1) as (NE x 0). */
4621 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4624 /* Canonicalize (LT x 1) as (LE x 0). */
4625 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4628 /* Canonicalize (LTU x 1) as (EQ x 0). */
4629 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4635 else if (op1
== constm1_rtx
)
4637 /* Canonicalize (LE x -1) as (LT x 0). */
4639 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4640 /* Canonicalize (GT x -1) as (GE x 0). */
4642 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4645 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4646 if ((code
== EQ
|| code
== NE
)
4647 && (op0code
== PLUS
|| op0code
== MINUS
)
4649 && CONSTANT_P (XEXP (op0
, 1))
4650 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4652 rtx x
= XEXP (op0
, 0);
4653 rtx c
= XEXP (op0
, 1);
4654 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4655 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4657 /* Detect an infinite recursive condition, where we oscillate at this
4658 simplification case between:
4659 A + B == C <---> C - B == A,
4660 where A, B, and C are all constants with non-simplifiable expressions,
4661 usually SYMBOL_REFs. */
4662 if (GET_CODE (tem
) == invcode
4664 && rtx_equal_p (c
, XEXP (tem
, 1)))
4667 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4670 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4671 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4673 && op1
== const0_rtx
4674 && GET_MODE_CLASS (mode
) == MODE_INT
4675 && cmp_mode
!= VOIDmode
4676 /* ??? Work-around BImode bugs in the ia64 backend. */
4678 && cmp_mode
!= BImode
4679 && nonzero_bits (op0
, cmp_mode
) == 1
4680 && STORE_FLAG_VALUE
== 1)
4681 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4682 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4683 : lowpart_subreg (mode
, op0
, cmp_mode
);
4685 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4686 if ((code
== EQ
|| code
== NE
)
4687 && op1
== const0_rtx
4689 return simplify_gen_relational (code
, mode
, cmp_mode
,
4690 XEXP (op0
, 0), XEXP (op0
, 1));
4692 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4693 if ((code
== EQ
|| code
== NE
)
4695 && rtx_equal_p (XEXP (op0
, 0), op1
)
4696 && !side_effects_p (XEXP (op0
, 0)))
4697 return simplify_gen_relational (code
, mode
, cmp_mode
,
4698 XEXP (op0
, 1), const0_rtx
);
4700 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4701 if ((code
== EQ
|| code
== NE
)
4703 && rtx_equal_p (XEXP (op0
, 1), op1
)
4704 && !side_effects_p (XEXP (op0
, 1)))
4705 return simplify_gen_relational (code
, mode
, cmp_mode
,
4706 XEXP (op0
, 0), const0_rtx
);
4708 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4709 if ((code
== EQ
|| code
== NE
)
4711 && CONST_SCALAR_INT_P (op1
)
4712 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4713 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4714 simplify_gen_binary (XOR
, cmp_mode
,
4715 XEXP (op0
, 1), op1
));
4717 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4723 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4724 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4725 XEXP (op0
, 0), const0_rtx
);
4730 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4731 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4732 XEXP (op0
, 0), const0_rtx
);
4751 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4752 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4753 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4754 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4755 For floating-point comparisons, assume that the operands were ordered. */
4758 comparison_result (enum rtx_code code
, int known_results
)
4764 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4767 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4771 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4774 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4778 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4781 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4784 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4786 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4789 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4791 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4794 return const_true_rtx
;
4802 /* Check if the given comparison (done in the given MODE) is actually a
4803 tautology or a contradiction.
4804 If no simplification is possible, this function returns zero.
4805 Otherwise, it returns either const_true_rtx or const0_rtx. */
4808 simplify_const_relational_operation (enum rtx_code code
,
4809 enum machine_mode mode
,
4816 gcc_assert (mode
!= VOIDmode
4817 || (GET_MODE (op0
) == VOIDmode
4818 && GET_MODE (op1
) == VOIDmode
));
4820 /* If op0 is a compare, extract the comparison arguments from it. */
4821 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4823 op1
= XEXP (op0
, 1);
4824 op0
= XEXP (op0
, 0);
4826 if (GET_MODE (op0
) != VOIDmode
)
4827 mode
= GET_MODE (op0
);
4828 else if (GET_MODE (op1
) != VOIDmode
)
4829 mode
= GET_MODE (op1
);
4834 /* We can't simplify MODE_CC values since we don't know what the
4835 actual comparison is. */
4836 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4839 /* Make sure the constant is second. */
4840 if (swap_commutative_operands_p (op0
, op1
))
4842 tem
= op0
, op0
= op1
, op1
= tem
;
4843 code
= swap_condition (code
);
4846 trueop0
= avoid_constant_pool_reference (op0
);
4847 trueop1
= avoid_constant_pool_reference (op1
);
4849 /* For integer comparisons of A and B maybe we can simplify A - B and can
4850 then simplify a comparison of that with zero. If A and B are both either
4851 a register or a CONST_INT, this can't help; testing for these cases will
4852 prevent infinite recursion here and speed things up.
4854 We can only do this for EQ and NE comparisons as otherwise we may
4855 lose or introduce overflow which we cannot disregard as undefined as
4856 we do not know the signedness of the operation on either the left or
4857 the right hand side of the comparison. */
4859 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4860 && (code
== EQ
|| code
== NE
)
4861 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4862 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4863 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4864 /* We cannot do this if tem is a nonzero address. */
4865 && ! nonzero_address_p (tem
))
4866 return simplify_const_relational_operation (signed_condition (code
),
4867 mode
, tem
, const0_rtx
);
4869 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4870 return const_true_rtx
;
4872 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4875 /* For modes without NaNs, if the two operands are equal, we know the
4876 result except if they have side-effects. Even with NaNs we know
4877 the result of unordered comparisons and, if signaling NaNs are
4878 irrelevant, also the result of LT/GT/LTGT. */
4879 if ((! HONOR_NANS (GET_MODE (trueop0
))
4880 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4881 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4882 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4883 && rtx_equal_p (trueop0
, trueop1
)
4884 && ! side_effects_p (trueop0
))
4885 return comparison_result (code
, CMP_EQ
);
4887 /* If the operands are floating-point constants, see if we can fold
4889 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4890 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4891 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4893 REAL_VALUE_TYPE d0
, d1
;
4895 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4896 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4898 /* Comparisons are unordered iff at least one of the values is NaN. */
4899 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4909 return const_true_rtx
;
4922 return comparison_result (code
,
4923 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4924 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4927 /* Otherwise, see if the operands are both integers. */
4928 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4929 && (CONST_DOUBLE_AS_INT_P (trueop0
) || CONST_INT_P (trueop0
))
4930 && (CONST_DOUBLE_AS_INT_P (trueop1
) || CONST_INT_P (trueop1
)))
4932 int width
= GET_MODE_PRECISION (mode
);
4933 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4934 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4936 /* Get the two words comprising each integer constant. */
4937 if (CONST_DOUBLE_AS_INT_P (trueop0
))
4939 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4940 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4944 l0u
= l0s
= INTVAL (trueop0
);
4945 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4948 if (CONST_DOUBLE_AS_INT_P (trueop1
))
4950 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4951 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4955 l1u
= l1s
= INTVAL (trueop1
);
4956 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4959 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4960 we have to sign or zero-extend the values. */
4961 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4963 l0u
&= GET_MODE_MASK (mode
);
4964 l1u
&= GET_MODE_MASK (mode
);
4966 if (val_signbit_known_set_p (mode
, l0s
))
4967 l0s
|= ~GET_MODE_MASK (mode
);
4969 if (val_signbit_known_set_p (mode
, l1s
))
4970 l1s
|= ~GET_MODE_MASK (mode
);
4972 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4973 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4975 if (h0u
== h1u
&& l0u
== l1u
)
4976 return comparison_result (code
, CMP_EQ
);
4980 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4981 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4982 return comparison_result (code
, cr
);
4986 /* Optimize comparisons with upper and lower bounds. */
4987 if (HWI_COMPUTABLE_MODE_P (mode
)
4988 && CONST_INT_P (trueop1
))
4991 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4992 HOST_WIDE_INT val
= INTVAL (trueop1
);
4993 HOST_WIDE_INT mmin
, mmax
;
5003 /* Get a reduced range if the sign bit is zero. */
5004 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
5011 rtx mmin_rtx
, mmax_rtx
;
5012 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
5014 mmin
= INTVAL (mmin_rtx
);
5015 mmax
= INTVAL (mmax_rtx
);
5018 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
5020 mmin
>>= (sign_copies
- 1);
5021 mmax
>>= (sign_copies
- 1);
5027 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5029 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5030 return const_true_rtx
;
5031 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5036 return const_true_rtx
;
5041 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5043 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5044 return const_true_rtx
;
5045 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5050 return const_true_rtx
;
5056 /* x == y is always false for y out of range. */
5057 if (val
< mmin
|| val
> mmax
)
5061 /* x > y is always false for y >= mmax, always true for y < mmin. */
5063 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5065 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5066 return const_true_rtx
;
5072 return const_true_rtx
;
5075 /* x < y is always false for y <= mmin, always true for y > mmax. */
5077 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5079 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5080 return const_true_rtx
;
5086 return const_true_rtx
;
5090 /* x != y is always true for y out of range. */
5091 if (val
< mmin
|| val
> mmax
)
5092 return const_true_rtx
;
5100 /* Optimize integer comparisons with zero. */
5101 if (trueop1
== const0_rtx
)
5103 /* Some addresses are known to be nonzero. We don't know
5104 their sign, but equality comparisons are known. */
5105 if (nonzero_address_p (trueop0
))
5107 if (code
== EQ
|| code
== LEU
)
5109 if (code
== NE
|| code
== GTU
)
5110 return const_true_rtx
;
5113 /* See if the first operand is an IOR with a constant. If so, we
5114 may be able to determine the result of this comparison. */
5115 if (GET_CODE (op0
) == IOR
)
5117 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5118 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5120 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5121 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5122 && (UINTVAL (inner_const
)
5123 & ((unsigned HOST_WIDE_INT
) 1
5133 return const_true_rtx
;
5137 return const_true_rtx
;
5151 /* Optimize comparison of ABS with zero. */
5152 if (trueop1
== CONST0_RTX (mode
)
5153 && (GET_CODE (trueop0
) == ABS
5154 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5155 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5160 /* Optimize abs(x) < 0.0. */
5161 if (!HONOR_SNANS (mode
)
5162 && (!INTEGRAL_MODE_P (mode
)
5163 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5165 if (INTEGRAL_MODE_P (mode
)
5166 && (issue_strict_overflow_warning
5167 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5168 warning (OPT_Wstrict_overflow
,
5169 ("assuming signed overflow does not occur when "
5170 "assuming abs (x) < 0 is false"));
5176 /* Optimize abs(x) >= 0.0. */
5177 if (!HONOR_NANS (mode
)
5178 && (!INTEGRAL_MODE_P (mode
)
5179 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5181 if (INTEGRAL_MODE_P (mode
)
5182 && (issue_strict_overflow_warning
5183 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5184 warning (OPT_Wstrict_overflow
,
5185 ("assuming signed overflow does not occur when "
5186 "assuming abs (x) >= 0 is true"));
5187 return const_true_rtx
;
5192 /* Optimize ! (abs(x) < 0.0). */
5193 return const_true_rtx
;
5203 /* Simplify CODE, an operation with result mode MODE and three operands,
5204 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5205 a constant. Return 0 if no simplifications is possible. */
5208 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
5209 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
5212 unsigned int width
= GET_MODE_PRECISION (mode
);
5213 bool any_change
= false;
5216 /* VOIDmode means "infinite" precision. */
5218 width
= HOST_BITS_PER_WIDE_INT
;
5223 /* Simplify negations around the multiplication. */
5224 /* -a * -b + c => a * b + c. */
5225 if (GET_CODE (op0
) == NEG
)
5227 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5229 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5231 else if (GET_CODE (op1
) == NEG
)
5233 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5235 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5238 /* Canonicalize the two multiplication operands. */
5239 /* a * -b + c => -b * a + c. */
5240 if (swap_commutative_operands_p (op0
, op1
))
5241 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
5244 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5249 if (CONST_INT_P (op0
)
5250 && CONST_INT_P (op1
)
5251 && CONST_INT_P (op2
)
5252 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5253 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5255 /* Extracting a bit-field from a constant */
5256 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5257 HOST_WIDE_INT op1val
= INTVAL (op1
);
5258 HOST_WIDE_INT op2val
= INTVAL (op2
);
5259 if (BITS_BIG_ENDIAN
)
5260 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5264 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5266 /* First zero-extend. */
5267 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5268 /* If desired, propagate sign bit. */
5269 if (code
== SIGN_EXTRACT
5270 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5272 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5275 return gen_int_mode (val
, mode
);
5280 if (CONST_INT_P (op0
))
5281 return op0
!= const0_rtx
? op1
: op2
;
5283 /* Convert c ? a : a into "a". */
5284 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5287 /* Convert a != b ? a : b into "a". */
5288 if (GET_CODE (op0
) == NE
5289 && ! side_effects_p (op0
)
5290 && ! HONOR_NANS (mode
)
5291 && ! HONOR_SIGNED_ZEROS (mode
)
5292 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5293 && rtx_equal_p (XEXP (op0
, 1), op2
))
5294 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5295 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5298 /* Convert a == b ? a : b into "b". */
5299 if (GET_CODE (op0
) == EQ
5300 && ! side_effects_p (op0
)
5301 && ! HONOR_NANS (mode
)
5302 && ! HONOR_SIGNED_ZEROS (mode
)
5303 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5304 && rtx_equal_p (XEXP (op0
, 1), op2
))
5305 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5306 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5309 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5311 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5312 ? GET_MODE (XEXP (op0
, 1))
5313 : GET_MODE (XEXP (op0
, 0)));
5316 /* Look for happy constants in op1 and op2. */
5317 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5319 HOST_WIDE_INT t
= INTVAL (op1
);
5320 HOST_WIDE_INT f
= INTVAL (op2
);
5322 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5323 code
= GET_CODE (op0
);
5324 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5327 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5335 return simplify_gen_relational (code
, mode
, cmp_mode
,
5336 XEXP (op0
, 0), XEXP (op0
, 1));
5339 if (cmp_mode
== VOIDmode
)
5340 cmp_mode
= op0_mode
;
5341 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5342 cmp_mode
, XEXP (op0
, 0),
5345 /* See if any simplifications were possible. */
5348 if (CONST_INT_P (temp
))
5349 return temp
== const0_rtx
? op2
: op1
;
5351 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5357 gcc_assert (GET_MODE (op0
) == mode
);
5358 gcc_assert (GET_MODE (op1
) == mode
);
5359 gcc_assert (VECTOR_MODE_P (mode
));
5360 op2
= avoid_constant_pool_reference (op2
);
5361 if (CONST_INT_P (op2
))
5363 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5364 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5365 int mask
= (1 << n_elts
) - 1;
5367 if (!(INTVAL (op2
) & mask
))
5369 if ((INTVAL (op2
) & mask
) == mask
)
5372 op0
= avoid_constant_pool_reference (op0
);
5373 op1
= avoid_constant_pool_reference (op1
);
5374 if (GET_CODE (op0
) == CONST_VECTOR
5375 && GET_CODE (op1
) == CONST_VECTOR
)
5377 rtvec v
= rtvec_alloc (n_elts
);
5380 for (i
= 0; i
< n_elts
; i
++)
5381 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
5382 ? CONST_VECTOR_ELT (op0
, i
)
5383 : CONST_VECTOR_ELT (op1
, i
));
5384 return gen_rtx_CONST_VECTOR (mode
, v
);
5396 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5398 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5400 Works by unpacking OP into a collection of 8-bit values
5401 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5402 and then repacking them again for OUTERMODE. */
5405 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5406 enum machine_mode innermode
, unsigned int byte
)
5408 /* We support up to 512-bit values (for V8DFmode). */
5412 value_mask
= (1 << value_bit
) - 1
5414 unsigned char value
[max_bitsize
/ value_bit
];
5423 rtvec result_v
= NULL
;
5424 enum mode_class outer_class
;
5425 enum machine_mode outer_submode
;
5427 /* Some ports misuse CCmode. */
5428 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5431 /* We have no way to represent a complex constant at the rtl level. */
5432 if (COMPLEX_MODE_P (outermode
))
5435 /* Unpack the value. */
5437 if (GET_CODE (op
) == CONST_VECTOR
)
5439 num_elem
= CONST_VECTOR_NUNITS (op
);
5440 elems
= &CONST_VECTOR_ELT (op
, 0);
5441 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5447 elem_bitsize
= max_bitsize
;
5449 /* If this asserts, it is too complicated; reducing value_bit may help. */
5450 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5451 /* I don't know how to handle endianness of sub-units. */
5452 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5454 for (elem
= 0; elem
< num_elem
; elem
++)
5457 rtx el
= elems
[elem
];
5459 /* Vectors are kept in target memory order. (This is probably
5462 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5463 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5465 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5466 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5467 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5468 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5469 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5472 switch (GET_CODE (el
))
5476 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5478 *vp
++ = INTVAL (el
) >> i
;
5479 /* CONST_INTs are always logically sign-extended. */
5480 for (; i
< elem_bitsize
; i
+= value_bit
)
5481 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5485 if (GET_MODE (el
) == VOIDmode
)
5487 unsigned char extend
= 0;
5488 /* If this triggers, someone should have generated a
5489 CONST_INT instead. */
5490 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5492 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5493 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5494 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5497 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5501 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5503 for (; i
< elem_bitsize
; i
+= value_bit
)
5508 long tmp
[max_bitsize
/ 32];
5509 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5511 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5512 gcc_assert (bitsize
<= elem_bitsize
);
5513 gcc_assert (bitsize
% value_bit
== 0);
5515 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5518 /* real_to_target produces its result in words affected by
5519 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5520 and use WORDS_BIG_ENDIAN instead; see the documentation
5521 of SUBREG in rtl.texi. */
5522 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5525 if (WORDS_BIG_ENDIAN
)
5526 ibase
= bitsize
- 1 - i
;
5529 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5532 /* It shouldn't matter what's done here, so fill it with
5534 for (; i
< elem_bitsize
; i
+= value_bit
)
5540 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5542 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5543 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5547 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5548 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5549 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5551 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5552 >> (i
- HOST_BITS_PER_WIDE_INT
);
5553 for (; i
< elem_bitsize
; i
+= value_bit
)
5563 /* Now, pick the right byte to start with. */
5564 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5565 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5566 will already have offset 0. */
5567 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5569 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5571 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5572 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5573 byte
= (subword_byte
% UNITS_PER_WORD
5574 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5577 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5578 so if it's become negative it will instead be very large.) */
5579 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5581 /* Convert from bytes to chunks of size value_bit. */
5582 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5584 /* Re-pack the value. */
5586 if (VECTOR_MODE_P (outermode
))
5588 num_elem
= GET_MODE_NUNITS (outermode
);
5589 result_v
= rtvec_alloc (num_elem
);
5590 elems
= &RTVEC_ELT (result_v
, 0);
5591 outer_submode
= GET_MODE_INNER (outermode
);
5597 outer_submode
= outermode
;
5600 outer_class
= GET_MODE_CLASS (outer_submode
);
5601 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5603 gcc_assert (elem_bitsize
% value_bit
== 0);
5604 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5606 for (elem
= 0; elem
< num_elem
; elem
++)
5610 /* Vectors are stored in target memory order. (This is probably
5613 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5614 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5616 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5617 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5618 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5619 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5620 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5623 switch (outer_class
)
5626 case MODE_PARTIAL_INT
:
5628 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5631 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5633 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5634 for (; i
< elem_bitsize
; i
+= value_bit
)
5635 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5636 << (i
- HOST_BITS_PER_WIDE_INT
);
5638 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5640 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5641 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5642 else if (elem_bitsize
<= HOST_BITS_PER_DOUBLE_INT
)
5643 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5650 case MODE_DECIMAL_FLOAT
:
5653 long tmp
[max_bitsize
/ 32];
5655 /* real_from_target wants its input in words affected by
5656 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5657 and use WORDS_BIG_ENDIAN instead; see the documentation
5658 of SUBREG in rtl.texi. */
5659 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5661 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5664 if (WORDS_BIG_ENDIAN
)
5665 ibase
= elem_bitsize
- 1 - i
;
5668 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5671 real_from_target (&r
, tmp
, outer_submode
);
5672 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5684 f
.mode
= outer_submode
;
5687 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5689 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5690 for (; i
< elem_bitsize
; i
+= value_bit
)
5691 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5692 << (i
- HOST_BITS_PER_WIDE_INT
));
5694 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5702 if (VECTOR_MODE_P (outermode
))
5703 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5708 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5709 Return 0 if no simplifications are possible. */
5711 simplify_subreg (enum machine_mode outermode
, rtx op
,
5712 enum machine_mode innermode
, unsigned int byte
)
5714 /* Little bit of sanity checking. */
5715 gcc_assert (innermode
!= VOIDmode
);
5716 gcc_assert (outermode
!= VOIDmode
);
5717 gcc_assert (innermode
!= BLKmode
);
5718 gcc_assert (outermode
!= BLKmode
);
5720 gcc_assert (GET_MODE (op
) == innermode
5721 || GET_MODE (op
) == VOIDmode
);
5723 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5726 if (byte
>= GET_MODE_SIZE (innermode
))
5729 if (outermode
== innermode
&& !byte
)
5732 if (CONST_SCALAR_INT_P (op
)
5733 || CONST_DOUBLE_AS_FLOAT_P (op
)
5734 || GET_CODE (op
) == CONST_FIXED
5735 || GET_CODE (op
) == CONST_VECTOR
)
5736 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5738 /* Changing mode twice with SUBREG => just change it once,
5739 or not at all if changing back op starting mode. */
5740 if (GET_CODE (op
) == SUBREG
)
5742 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5743 int final_offset
= byte
+ SUBREG_BYTE (op
);
5746 if (outermode
== innermostmode
5747 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5748 return SUBREG_REG (op
);
5750 /* The SUBREG_BYTE represents offset, as if the value were stored
5751 in memory. Irritating exception is paradoxical subreg, where
5752 we define SUBREG_BYTE to be 0. On big endian machines, this
5753 value should be negative. For a moment, undo this exception. */
5754 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5756 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5757 if (WORDS_BIG_ENDIAN
)
5758 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5759 if (BYTES_BIG_ENDIAN
)
5760 final_offset
+= difference
% UNITS_PER_WORD
;
5762 if (SUBREG_BYTE (op
) == 0
5763 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5765 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5766 if (WORDS_BIG_ENDIAN
)
5767 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5768 if (BYTES_BIG_ENDIAN
)
5769 final_offset
+= difference
% UNITS_PER_WORD
;
5772 /* See whether resulting subreg will be paradoxical. */
5773 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5775 /* In nonparadoxical subregs we can't handle negative offsets. */
5776 if (final_offset
< 0)
5778 /* Bail out in case resulting subreg would be incorrect. */
5779 if (final_offset
% GET_MODE_SIZE (outermode
)
5780 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5786 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5788 /* In paradoxical subreg, see if we are still looking on lower part.
5789 If so, our SUBREG_BYTE will be 0. */
5790 if (WORDS_BIG_ENDIAN
)
5791 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5792 if (BYTES_BIG_ENDIAN
)
5793 offset
+= difference
% UNITS_PER_WORD
;
5794 if (offset
== final_offset
)
5800 /* Recurse for further possible simplifications. */
5801 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5805 if (validate_subreg (outermode
, innermostmode
,
5806 SUBREG_REG (op
), final_offset
))
5808 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5809 if (SUBREG_PROMOTED_VAR_P (op
)
5810 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5811 && GET_MODE_CLASS (outermode
) == MODE_INT
5812 && IN_RANGE (GET_MODE_SIZE (outermode
),
5813 GET_MODE_SIZE (innermode
),
5814 GET_MODE_SIZE (innermostmode
))
5815 && subreg_lowpart_p (newx
))
5817 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5818 SUBREG_PROMOTED_UNSIGNED_SET
5819 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5826 /* SUBREG of a hard register => just change the register number
5827 and/or mode. If the hard register is not valid in that mode,
5828 suppress this simplification. If the hard register is the stack,
5829 frame, or argument pointer, leave this as a SUBREG. */
5831 if (REG_P (op
) && HARD_REGISTER_P (op
))
5833 unsigned int regno
, final_regno
;
5836 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5837 if (HARD_REGISTER_NUM_P (final_regno
))
5840 int final_offset
= byte
;
5842 /* Adjust offset for paradoxical subregs. */
5844 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5846 int difference
= (GET_MODE_SIZE (innermode
)
5847 - GET_MODE_SIZE (outermode
));
5848 if (WORDS_BIG_ENDIAN
)
5849 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5850 if (BYTES_BIG_ENDIAN
)
5851 final_offset
+= difference
% UNITS_PER_WORD
;
5854 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5856 /* Propagate original regno. We don't have any way to specify
5857 the offset inside original regno, so do so only for lowpart.
5858 The information is used only by alias analysis that can not
5859 grog partial register anyway. */
5861 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5862 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5867 /* If we have a SUBREG of a register that we are replacing and we are
5868 replacing it with a MEM, make a new MEM and try replacing the
5869 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5870 or if we would be widening it. */
5873 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5874 /* Allow splitting of volatile memory references in case we don't
5875 have instruction to move the whole thing. */
5876 && (! MEM_VOLATILE_P (op
)
5877 || ! have_insn_for (SET
, innermode
))
5878 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5879 return adjust_address_nv (op
, outermode
, byte
);
5881 /* Handle complex values represented as CONCAT
5882 of real and imaginary part. */
5883 if (GET_CODE (op
) == CONCAT
)
5885 unsigned int part_size
, final_offset
;
5888 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5889 if (byte
< part_size
)
5891 part
= XEXP (op
, 0);
5892 final_offset
= byte
;
5896 part
= XEXP (op
, 1);
5897 final_offset
= byte
- part_size
;
5900 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5903 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5906 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5907 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5911 /* A SUBREG resulting from a zero extension may fold to zero if
5912 it extracts higher bits that the ZERO_EXTEND's source bits. */
5913 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
5915 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5916 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5917 return CONST0_RTX (outermode
);
5920 if (SCALAR_INT_MODE_P (outermode
)
5921 && SCALAR_INT_MODE_P (innermode
)
5922 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5923 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5925 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
5933 /* Make a SUBREG operation or equivalent if it folds. */
5936 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5937 enum machine_mode innermode
, unsigned int byte
)
5941 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5945 if (GET_CODE (op
) == SUBREG
5946 || GET_CODE (op
) == CONCAT
5947 || GET_MODE (op
) == VOIDmode
)
5950 if (validate_subreg (outermode
, innermode
, op
, byte
))
5951 return gen_rtx_SUBREG (outermode
, op
, byte
);
5956 /* Simplify X, an rtx expression.
5958 Return the simplified expression or NULL if no simplifications
5961 This is the preferred entry point into the simplification routines;
5962 however, we still allow passes to call the more specific routines.
5964 Right now GCC has three (yes, three) major bodies of RTL simplification
5965 code that need to be unified.
5967 1. fold_rtx in cse.c. This code uses various CSE specific
5968 information to aid in RTL simplification.
5970 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5971 it uses combine specific information to aid in RTL
5974 3. The routines in this file.
5977 Long term we want to only have one body of simplification code; to
5978 get to that state I recommend the following steps:
5980 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5981 which are not pass dependent state into these routines.
5983 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5984 use this routine whenever possible.
5986 3. Allow for pass dependent state to be provided to these
5987 routines and add simplifications based on the pass dependent
5988 state. Remove code from cse.c & combine.c that becomes
5991 It will take time, but ultimately the compiler will be easier to
5992 maintain and improve. It's totally silly that when we add a
5993 simplification that it needs to be added to 4 places (3 for RTL
5994 simplification and 1 for tree simplification. */
5997 simplify_rtx (const_rtx x
)
5999 const enum rtx_code code
= GET_CODE (x
);
6000 const enum machine_mode mode
= GET_MODE (x
);
6002 switch (GET_RTX_CLASS (code
))
6005 return simplify_unary_operation (code
, mode
,
6006 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6007 case RTX_COMM_ARITH
:
6008 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6009 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6011 /* Fall through.... */
6014 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6017 case RTX_BITFIELD_OPS
:
6018 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6019 XEXP (x
, 0), XEXP (x
, 1),
6023 case RTX_COMM_COMPARE
:
6024 return simplify_relational_operation (code
, mode
,
6025 ((GET_MODE (XEXP (x
, 0))
6027 ? GET_MODE (XEXP (x
, 0))
6028 : GET_MODE (XEXP (x
, 1))),
6034 return simplify_subreg (mode
, SUBREG_REG (x
),
6035 GET_MODE (SUBREG_REG (x
)),
6042 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6043 if (GET_CODE (XEXP (x
, 0)) == HIGH
6044 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))