2005-12-13 Paul Brook <paul@codesourcery.com>
[official-gcc.git] / gcc / simplify-rtx.c
blob9b39f0d97225cba63a26922916c2019da1bf4312
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
64 rtx, rtx, rtx, rtx);
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
68 static rtx
69 neg_const_int (enum machine_mode mode, rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
77 bool
78 mode_signbit_p (enum machine_mode mode, rtx x)
80 unsigned HOST_WIDE_INT val;
81 unsigned int width;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
84 return false;
86 width = GET_MODE_BITSIZE (mode);
87 if (width == 0)
88 return false;
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
92 val = INTVAL (x);
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
100 else
101 return false;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 rtx op1)
115 rtx tem;
117 /* Put complex operands first and constants second if commutative. */
118 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
119 && swap_commutative_operands_p (op0, op1))
120 tem = op0, op0 = op1, op1 = tem;
122 /* If this simplifies, do it. */
123 tem = simplify_binary_operation (code, mode, op0, op1);
124 if (tem)
125 return tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
135 rtx c, tmp, addr;
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
141 case MEM:
142 break;
144 case FLOAT_EXTEND:
145 /* Handle float extensions of constant pool references. */
146 tmp = XEXP (x, 0);
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 REAL_VALUE_TYPE d;
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 return x;
157 default:
158 return x;
161 addr = XEXP (x, 0);
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
193 return tem;
195 else
196 return c;
199 return x;
202 /* Return true if X is a MEM referencing the constant pool. */
204 bool
205 constant_pool_reference_p (rtx x)
207 return avoid_constant_pool_reference (x) != x;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
217 rtx tem;
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
221 return tem;
223 return gen_rtx_fmt_e (code, mode, op);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
232 rtx tem;
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
236 op0, op1, op2)))
237 return tem;
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
249 rtx tem;
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
252 op0, op1)))
253 return tem;
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
267 rtx op0, op1, op2;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
273 if (x == old_rtx)
274 return new_rtx;
276 switch (GET_RTX_CLASS (code))
278 case RTX_UNARY:
279 op0 = XEXP (x, 0);
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
283 return x;
284 return simplify_gen_unary (code, mode, op0, op_mode);
286 case RTX_BIN_ARITH:
287 case RTX_COMM_ARITH:
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
291 return x;
292 return simplify_gen_binary (code, mode, op0, op1);
294 case RTX_COMPARE:
295 case RTX_COMM_COMPARE:
296 op0 = XEXP (x, 0);
297 op1 = XEXP (x, 1);
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302 return x;
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
305 case RTX_TERNARY:
306 case RTX_BITFIELD_OPS:
307 op0 = XEXP (x, 0);
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
313 return x;
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
318 case RTX_EXTRA:
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
324 return x;
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 return op0 ? op0 : x;
330 break;
332 case RTX_OBJ:
333 if (code == MEM)
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
337 return x;
338 return replace_equiv_address_nv (x, op0);
340 else if (code == LO_SUM)
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
347 return op1;
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
350 return x;
351 return gen_rtx_LO_SUM (mode, op0, op1);
353 else if (code == REG)
355 if (rtx_equal_p (x, old_rtx))
356 return new_rtx;
358 break;
360 default:
361 break;
363 return x;
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
373 rtx trueop, tem;
375 if (GET_CODE (op) == CONST)
376 op = XEXP (op, 0);
378 trueop = avoid_constant_pool_reference (op);
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
381 if (tem)
382 return tem;
384 return simplify_unary_operation_1 (code, mode, op);
387 /* Perform some simplifications we can do even if the operands
388 aren't constant. */
389 static rtx
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392 enum rtx_code reversed;
393 rtx temp;
395 switch (code)
397 case NOT:
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
400 return XEXP (op, 0);
402 /* (not (eq X Y)) == (ne X Y), etc. */
403 if (COMPARISON_P (op)
404 && (mode == BImode || STORE_FLAG_VALUE == -1)
405 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
406 return simplify_gen_relational (reversed, mode, VOIDmode,
407 XEXP (op, 0), XEXP (op, 1));
409 /* (not (plus X -1)) can become (neg X). */
410 if (GET_CODE (op) == PLUS
411 && XEXP (op, 1) == constm1_rtx)
412 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
414 /* Similarly, (not (neg X)) is (plus X -1). */
415 if (GET_CODE (op) == NEG)
416 return plus_constant (XEXP (op, 0), -1);
418 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
419 if (GET_CODE (op) == XOR
420 && GET_CODE (XEXP (op, 1)) == CONST_INT
421 && (temp = simplify_unary_operation (NOT, mode,
422 XEXP (op, 1), mode)) != 0)
423 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
425 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
426 if (GET_CODE (op) == PLUS
427 && GET_CODE (XEXP (op, 1)) == CONST_INT
428 && mode_signbit_p (mode, XEXP (op, 1))
429 && (temp = simplify_unary_operation (NOT, mode,
430 XEXP (op, 1), mode)) != 0)
431 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
434 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
435 operands other than 1, but that is not valid. We could do a
436 similar simplification for (not (lshiftrt C X)) where C is
437 just the sign bit, but this doesn't seem common enough to
438 bother with. */
439 if (GET_CODE (op) == ASHIFT
440 && XEXP (op, 0) == const1_rtx)
442 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
443 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
446 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
447 by reversing the comparison code if valid. */
448 if (STORE_FLAG_VALUE == -1
449 && COMPARISON_P (op)
450 && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
451 return simplify_gen_relational (reversed, mode, VOIDmode,
452 XEXP (op, 0), XEXP (op, 1));
454 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
455 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
456 so we can perform the above simplification. */
458 if (STORE_FLAG_VALUE == -1
459 && GET_CODE (op) == ASHIFTRT
460 && GET_CODE (XEXP (op, 1)) == CONST_INT
461 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
462 return simplify_gen_relational (GE, mode, VOIDmode,
463 XEXP (op, 0), const0_rtx);
465 break;
467 case NEG:
468 /* (neg (neg X)) == X. */
469 if (GET_CODE (op) == NEG)
470 return XEXP (op, 0);
472 /* (neg (plus X 1)) can become (not X). */
473 if (GET_CODE (op) == PLUS
474 && XEXP (op, 1) == const1_rtx)
475 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
477 /* Similarly, (neg (not X)) is (plus X 1). */
478 if (GET_CODE (op) == NOT)
479 return plus_constant (XEXP (op, 0), 1);
481 /* (neg (minus X Y)) can become (minus Y X). This transformation
482 isn't safe for modes with signed zeros, since if X and Y are
483 both +0, (minus Y X) is the same as (minus X Y). If the
484 rounding mode is towards +infinity (or -infinity) then the two
485 expressions will be rounded differently. */
486 if (GET_CODE (op) == MINUS
487 && !HONOR_SIGNED_ZEROS (mode)
488 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
489 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
491 if (GET_CODE (op) == PLUS
492 && !HONOR_SIGNED_ZEROS (mode)
493 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
495 /* (neg (plus A C)) is simplified to (minus -C A). */
496 if (GET_CODE (XEXP (op, 1)) == CONST_INT
497 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
499 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
500 if (temp)
501 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
504 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
505 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
506 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
509 /* (neg (mult A B)) becomes (mult (neg A) B).
510 This works even for floating-point values. */
511 if (GET_CODE (op) == MULT
512 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
514 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
515 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
518 /* NEG commutes with ASHIFT since it is multiplication. Only do
519 this if we can then eliminate the NEG (e.g., if the operand
520 is a constant). */
521 if (GET_CODE (op) == ASHIFT)
523 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
524 if (temp)
525 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
528 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
529 C is equal to the width of MODE minus 1. */
530 if (GET_CODE (op) == ASHIFTRT
531 && GET_CODE (XEXP (op, 1)) == CONST_INT
532 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
533 return simplify_gen_binary (LSHIFTRT, mode,
534 XEXP (op, 0), XEXP (op, 1));
536 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
537 C is equal to the width of MODE minus 1. */
538 if (GET_CODE (op) == LSHIFTRT
539 && GET_CODE (XEXP (op, 1)) == CONST_INT
540 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
541 return simplify_gen_binary (ASHIFTRT, mode,
542 XEXP (op, 0), XEXP (op, 1));
544 break;
546 case SIGN_EXTEND:
547 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
548 becomes just the MINUS if its mode is MODE. This allows
549 folding switch statements on machines using casesi (such as
550 the VAX). */
551 if (GET_CODE (op) == TRUNCATE
552 && GET_MODE (XEXP (op, 0)) == mode
553 && GET_CODE (XEXP (op, 0)) == MINUS
554 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
555 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
556 return XEXP (op, 0);
558 /* Check for a sign extension of a subreg of a promoted
559 variable, where the promotion is sign-extended, and the
560 target mode is the same as the variable's promotion. */
561 if (GET_CODE (op) == SUBREG
562 && SUBREG_PROMOTED_VAR_P (op)
563 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
564 && GET_MODE (XEXP (op, 0)) == mode)
565 return XEXP (op, 0);
567 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
568 if (! POINTERS_EXTEND_UNSIGNED
569 && mode == Pmode && GET_MODE (op) == ptr_mode
570 && (CONSTANT_P (op)
571 || (GET_CODE (op) == SUBREG
572 && REG_P (SUBREG_REG (op))
573 && REG_POINTER (SUBREG_REG (op))
574 && GET_MODE (SUBREG_REG (op)) == Pmode)))
575 return convert_memory_address (Pmode, op);
576 #endif
577 break;
579 case ZERO_EXTEND:
580 /* Check for a zero extension of a subreg of a promoted
581 variable, where the promotion is zero-extended, and the
582 target mode is the same as the variable's promotion. */
583 if (GET_CODE (op) == SUBREG
584 && SUBREG_PROMOTED_VAR_P (op)
585 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
586 && GET_MODE (XEXP (op, 0)) == mode)
587 return XEXP (op, 0);
589 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
590 if (POINTERS_EXTEND_UNSIGNED > 0
591 && mode == Pmode && GET_MODE (op) == ptr_mode
592 && (CONSTANT_P (op)
593 || (GET_CODE (op) == SUBREG
594 && REG_P (SUBREG_REG (op))
595 && REG_POINTER (SUBREG_REG (op))
596 && GET_MODE (SUBREG_REG (op)) == Pmode)))
597 return convert_memory_address (Pmode, op);
598 #endif
599 break;
601 default:
602 break;
605 return 0;
608 /* Try to compute the value of a unary operation CODE whose output mode is to
609 be MODE with input operand OP whose mode was originally OP_MODE.
610 Return zero if the value cannot be computed. */
612 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
613 rtx op, enum machine_mode op_mode)
615 unsigned int width = GET_MODE_BITSIZE (mode);
617 if (code == VEC_DUPLICATE)
619 gcc_assert (VECTOR_MODE_P (mode));
620 if (GET_MODE (op) != VOIDmode)
622 if (!VECTOR_MODE_P (GET_MODE (op)))
623 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
624 else
625 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
626 (GET_MODE (op)));
628 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
629 || GET_CODE (op) == CONST_VECTOR)
631 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
632 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
633 rtvec v = rtvec_alloc (n_elts);
634 unsigned int i;
636 if (GET_CODE (op) != CONST_VECTOR)
637 for (i = 0; i < n_elts; i++)
638 RTVEC_ELT (v, i) = op;
639 else
641 enum machine_mode inmode = GET_MODE (op);
642 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
643 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
645 gcc_assert (in_n_elts < n_elts);
646 gcc_assert ((n_elts % in_n_elts) == 0);
647 for (i = 0; i < n_elts; i++)
648 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
650 return gen_rtx_CONST_VECTOR (mode, v);
654 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
656 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
657 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
658 enum machine_mode opmode = GET_MODE (op);
659 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
660 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
661 rtvec v = rtvec_alloc (n_elts);
662 unsigned int i;
664 gcc_assert (op_n_elts == n_elts);
665 for (i = 0; i < n_elts; i++)
667 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
668 CONST_VECTOR_ELT (op, i),
669 GET_MODE_INNER (opmode));
670 if (!x)
671 return 0;
672 RTVEC_ELT (v, i) = x;
674 return gen_rtx_CONST_VECTOR (mode, v);
677 /* The order of these tests is critical so that, for example, we don't
678 check the wrong mode (input vs. output) for a conversion operation,
679 such as FIX. At some point, this should be simplified. */
681 if (code == FLOAT && GET_MODE (op) == VOIDmode
682 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
684 HOST_WIDE_INT hv, lv;
685 REAL_VALUE_TYPE d;
687 if (GET_CODE (op) == CONST_INT)
688 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
689 else
690 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
692 REAL_VALUE_FROM_INT (d, lv, hv, mode);
693 d = real_value_truncate (mode, d);
694 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
696 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
697 && (GET_CODE (op) == CONST_DOUBLE
698 || GET_CODE (op) == CONST_INT))
700 HOST_WIDE_INT hv, lv;
701 REAL_VALUE_TYPE d;
703 if (GET_CODE (op) == CONST_INT)
704 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
705 else
706 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
708 if (op_mode == VOIDmode)
710 /* We don't know how to interpret negative-looking numbers in
711 this case, so don't try to fold those. */
712 if (hv < 0)
713 return 0;
715 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
717 else
718 hv = 0, lv &= GET_MODE_MASK (op_mode);
720 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
721 d = real_value_truncate (mode, d);
722 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
725 if (GET_CODE (op) == CONST_INT
726 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
728 HOST_WIDE_INT arg0 = INTVAL (op);
729 HOST_WIDE_INT val;
731 switch (code)
733 case NOT:
734 val = ~ arg0;
735 break;
737 case NEG:
738 val = - arg0;
739 break;
741 case ABS:
742 val = (arg0 >= 0 ? arg0 : - arg0);
743 break;
745 case FFS:
746 /* Don't use ffs here. Instead, get low order bit and then its
747 number. If arg0 is zero, this will return 0, as desired. */
748 arg0 &= GET_MODE_MASK (mode);
749 val = exact_log2 (arg0 & (- arg0)) + 1;
750 break;
752 case CLZ:
753 arg0 &= GET_MODE_MASK (mode);
754 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
756 else
757 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
758 break;
760 case CTZ:
761 arg0 &= GET_MODE_MASK (mode);
762 if (arg0 == 0)
764 /* Even if the value at zero is undefined, we have to come
765 up with some replacement. Seems good enough. */
766 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
767 val = GET_MODE_BITSIZE (mode);
769 else
770 val = exact_log2 (arg0 & -arg0);
771 break;
773 case POPCOUNT:
774 arg0 &= GET_MODE_MASK (mode);
775 val = 0;
776 while (arg0)
777 val++, arg0 &= arg0 - 1;
778 break;
780 case PARITY:
781 arg0 &= GET_MODE_MASK (mode);
782 val = 0;
783 while (arg0)
784 val++, arg0 &= arg0 - 1;
785 val &= 1;
786 break;
788 case TRUNCATE:
789 val = arg0;
790 break;
792 case ZERO_EXTEND:
793 /* When zero-extending a CONST_INT, we need to know its
794 original mode. */
795 gcc_assert (op_mode != VOIDmode);
796 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
798 /* If we were really extending the mode,
799 we would have to distinguish between zero-extension
800 and sign-extension. */
801 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
802 val = arg0;
804 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
805 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
806 else
807 return 0;
808 break;
810 case SIGN_EXTEND:
811 if (op_mode == VOIDmode)
812 op_mode = mode;
813 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
815 /* If we were really extending the mode,
816 we would have to distinguish between zero-extension
817 and sign-extension. */
818 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
819 val = arg0;
821 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
824 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
825 if (val
826 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
827 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
829 else
830 return 0;
831 break;
833 case SQRT:
834 case FLOAT_EXTEND:
835 case FLOAT_TRUNCATE:
836 case SS_TRUNCATE:
837 case US_TRUNCATE:
838 return 0;
840 default:
841 gcc_unreachable ();
844 return gen_int_mode (val, mode);
847 /* We can do some operations on integer CONST_DOUBLEs. Also allow
848 for a DImode operation on a CONST_INT. */
849 else if (GET_MODE (op) == VOIDmode
850 && width <= HOST_BITS_PER_WIDE_INT * 2
851 && (GET_CODE (op) == CONST_DOUBLE
852 || GET_CODE (op) == CONST_INT))
854 unsigned HOST_WIDE_INT l1, lv;
855 HOST_WIDE_INT h1, hv;
857 if (GET_CODE (op) == CONST_DOUBLE)
858 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
859 else
860 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
862 switch (code)
864 case NOT:
865 lv = ~ l1;
866 hv = ~ h1;
867 break;
869 case NEG:
870 neg_double (l1, h1, &lv, &hv);
871 break;
873 case ABS:
874 if (h1 < 0)
875 neg_double (l1, h1, &lv, &hv);
876 else
877 lv = l1, hv = h1;
878 break;
880 case FFS:
881 hv = 0;
882 if (l1 == 0)
884 if (h1 == 0)
885 lv = 0;
886 else
887 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
889 else
890 lv = exact_log2 (l1 & -l1) + 1;
891 break;
893 case CLZ:
894 hv = 0;
895 if (h1 != 0)
896 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
897 - HOST_BITS_PER_WIDE_INT;
898 else if (l1 != 0)
899 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
900 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
901 lv = GET_MODE_BITSIZE (mode);
902 break;
904 case CTZ:
905 hv = 0;
906 if (l1 != 0)
907 lv = exact_log2 (l1 & -l1);
908 else if (h1 != 0)
909 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
910 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
911 lv = GET_MODE_BITSIZE (mode);
912 break;
914 case POPCOUNT:
915 hv = 0;
916 lv = 0;
917 while (l1)
918 lv++, l1 &= l1 - 1;
919 while (h1)
920 lv++, h1 &= h1 - 1;
921 break;
923 case PARITY:
924 hv = 0;
925 lv = 0;
926 while (l1)
927 lv++, l1 &= l1 - 1;
928 while (h1)
929 lv++, h1 &= h1 - 1;
930 lv &= 1;
931 break;
933 case TRUNCATE:
934 /* This is just a change-of-mode, so do nothing. */
935 lv = l1, hv = h1;
936 break;
938 case ZERO_EXTEND:
939 gcc_assert (op_mode != VOIDmode);
941 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
942 return 0;
944 hv = 0;
945 lv = l1 & GET_MODE_MASK (op_mode);
946 break;
948 case SIGN_EXTEND:
949 if (op_mode == VOIDmode
950 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
951 return 0;
952 else
954 lv = l1 & GET_MODE_MASK (op_mode);
955 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
956 && (lv & ((HOST_WIDE_INT) 1
957 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
958 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
960 hv = HWI_SIGN_EXTEND (lv);
962 break;
964 case SQRT:
965 return 0;
967 default:
968 return 0;
971 return immed_double_const (lv, hv, mode);
974 else if (GET_CODE (op) == CONST_DOUBLE
975 && SCALAR_FLOAT_MODE_P (mode))
977 REAL_VALUE_TYPE d, t;
978 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
980 switch (code)
982 case SQRT:
983 if (HONOR_SNANS (mode) && real_isnan (&d))
984 return 0;
985 real_sqrt (&t, mode, &d);
986 d = t;
987 break;
988 case ABS:
989 d = REAL_VALUE_ABS (d);
990 break;
991 case NEG:
992 d = REAL_VALUE_NEGATE (d);
993 break;
994 case FLOAT_TRUNCATE:
995 d = real_value_truncate (mode, d);
996 break;
997 case FLOAT_EXTEND:
998 /* All this does is change the mode. */
999 break;
1000 case FIX:
1001 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1002 break;
1003 case NOT:
1005 long tmp[4];
1006 int i;
1008 real_to_target (tmp, &d, GET_MODE (op));
1009 for (i = 0; i < 4; i++)
1010 tmp[i] = ~tmp[i];
1011 real_from_target (&d, tmp, mode);
1012 break;
1014 default:
1015 gcc_unreachable ();
1017 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1020 else if (GET_CODE (op) == CONST_DOUBLE
1021 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1022 && GET_MODE_CLASS (mode) == MODE_INT
1023 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1025 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1026 operators are intentionally left unspecified (to ease implementation
1027 by target backends), for consistency, this routine implements the
1028 same semantics for constant folding as used by the middle-end. */
1030 /* This was formerly used only for non-IEEE float.
1031 eggert@twinsun.com says it is safe for IEEE also. */
1032 HOST_WIDE_INT xh, xl, th, tl;
1033 REAL_VALUE_TYPE x, t;
1034 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1035 switch (code)
1037 case FIX:
1038 if (REAL_VALUE_ISNAN (x))
1039 return const0_rtx;
1041 /* Test against the signed upper bound. */
1042 if (width > HOST_BITS_PER_WIDE_INT)
1044 th = ((unsigned HOST_WIDE_INT) 1
1045 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1046 tl = -1;
1048 else
1050 th = 0;
1051 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1053 real_from_integer (&t, VOIDmode, tl, th, 0);
1054 if (REAL_VALUES_LESS (t, x))
1056 xh = th;
1057 xl = tl;
1058 break;
1061 /* Test against the signed lower bound. */
1062 if (width > HOST_BITS_PER_WIDE_INT)
1064 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1065 tl = 0;
1067 else
1069 th = -1;
1070 tl = (HOST_WIDE_INT) -1 << (width - 1);
1072 real_from_integer (&t, VOIDmode, tl, th, 0);
1073 if (REAL_VALUES_LESS (x, t))
1075 xh = th;
1076 xl = tl;
1077 break;
1079 REAL_VALUE_TO_INT (&xl, &xh, x);
1080 break;
1082 case UNSIGNED_FIX:
1083 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1084 return const0_rtx;
1086 /* Test against the unsigned upper bound. */
1087 if (width == 2*HOST_BITS_PER_WIDE_INT)
1089 th = -1;
1090 tl = -1;
1092 else if (width >= HOST_BITS_PER_WIDE_INT)
1094 th = ((unsigned HOST_WIDE_INT) 1
1095 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1096 tl = -1;
1098 else
1100 th = 0;
1101 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1103 real_from_integer (&t, VOIDmode, tl, th, 1);
1104 if (REAL_VALUES_LESS (t, x))
1106 xh = th;
1107 xl = tl;
1108 break;
1111 REAL_VALUE_TO_INT (&xl, &xh, x);
1112 break;
1114 default:
1115 gcc_unreachable ();
1117 return immed_double_const (xl, xh, mode);
1120 return NULL_RTX;
1123 /* Subroutine of simplify_binary_operation to simplify a commutative,
1124 associative binary operation CODE with result mode MODE, operating
1125 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1126 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1127 canonicalization is possible. */
1129 static rtx
1130 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1131 rtx op0, rtx op1)
1133 rtx tem;
1135 /* Linearize the operator to the left. */
1136 if (GET_CODE (op1) == code)
1138 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1139 if (GET_CODE (op0) == code)
1141 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1142 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1145 /* "a op (b op c)" becomes "(b op c) op a". */
1146 if (! swap_commutative_operands_p (op1, op0))
1147 return simplify_gen_binary (code, mode, op1, op0);
1149 tem = op0;
1150 op0 = op1;
1151 op1 = tem;
1154 if (GET_CODE (op0) == code)
1156 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1157 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1159 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1160 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1163 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1164 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1165 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1166 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1167 if (tem != 0)
1168 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1170 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1171 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1172 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1173 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1174 if (tem != 0)
1175 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1178 return 0;
1182 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1183 and OP1. Return 0 if no simplification is possible.
1185 Don't use this for relational operations such as EQ or LT.
1186 Use simplify_relational_operation instead. */
1188 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1189 rtx op0, rtx op1)
1191 rtx trueop0, trueop1;
1192 rtx tem;
1194 /* Relational operations don't work here. We must know the mode
1195 of the operands in order to do the comparison correctly.
1196 Assuming a full word can give incorrect results.
1197 Consider comparing 128 with -128 in QImode. */
1198 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1199 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1201 /* Make sure the constant is second. */
1202 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1203 && swap_commutative_operands_p (op0, op1))
1205 tem = op0, op0 = op1, op1 = tem;
1208 trueop0 = avoid_constant_pool_reference (op0);
1209 trueop1 = avoid_constant_pool_reference (op1);
1211 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1212 if (tem)
1213 return tem;
1214 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1217 static rtx
1218 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1219 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1221 rtx tem;
1222 HOST_WIDE_INT val;
1223 unsigned int width = GET_MODE_BITSIZE (mode);
1225 /* Even if we can't compute a constant result,
1226 there are some cases worth simplifying. */
1228 switch (code)
1230 case PLUS:
1231 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1232 when x is NaN, infinite, or finite and nonzero. They aren't
1233 when x is -0 and the rounding mode is not towards -infinity,
1234 since (-0) + 0 is then 0. */
1235 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1236 return op0;
1238 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1239 transformations are safe even for IEEE. */
1240 if (GET_CODE (op0) == NEG)
1241 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1242 else if (GET_CODE (op1) == NEG)
1243 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1245 /* (~a) + 1 -> -a */
1246 if (INTEGRAL_MODE_P (mode)
1247 && GET_CODE (op0) == NOT
1248 && trueop1 == const1_rtx)
1249 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1251 /* Handle both-operands-constant cases. We can only add
1252 CONST_INTs to constants since the sum of relocatable symbols
1253 can't be handled by most assemblers. Don't add CONST_INT
1254 to CONST_INT since overflow won't be computed properly if wider
1255 than HOST_BITS_PER_WIDE_INT. */
1257 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1258 && GET_CODE (op1) == CONST_INT)
1259 return plus_constant (op0, INTVAL (op1));
1260 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1261 && GET_CODE (op0) == CONST_INT)
1262 return plus_constant (op1, INTVAL (op0));
1264 /* See if this is something like X * C - X or vice versa or
1265 if the multiplication is written as a shift. If so, we can
1266 distribute and make a new multiply, shift, or maybe just
1267 have X (if C is 2 in the example above). But don't make
1268 something more expensive than we had before. */
1270 if (SCALAR_INT_MODE_P (mode))
1272 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1273 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1274 rtx lhs = op0, rhs = op1;
1276 if (GET_CODE (lhs) == NEG)
1278 coeff0l = -1;
1279 coeff0h = -1;
1280 lhs = XEXP (lhs, 0);
1282 else if (GET_CODE (lhs) == MULT
1283 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1285 coeff0l = INTVAL (XEXP (lhs, 1));
1286 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1287 lhs = XEXP (lhs, 0);
1289 else if (GET_CODE (lhs) == ASHIFT
1290 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1291 && INTVAL (XEXP (lhs, 1)) >= 0
1292 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1294 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1295 coeff0h = 0;
1296 lhs = XEXP (lhs, 0);
1299 if (GET_CODE (rhs) == NEG)
1301 coeff1l = -1;
1302 coeff1h = -1;
1303 rhs = XEXP (rhs, 0);
1305 else if (GET_CODE (rhs) == MULT
1306 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1308 coeff1l = INTVAL (XEXP (rhs, 1));
1309 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1310 rhs = XEXP (rhs, 0);
1312 else if (GET_CODE (rhs) == ASHIFT
1313 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1314 && INTVAL (XEXP (rhs, 1)) >= 0
1315 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1317 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1318 coeff1h = 0;
1319 rhs = XEXP (rhs, 0);
1322 if (rtx_equal_p (lhs, rhs))
1324 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1325 rtx coeff;
1326 unsigned HOST_WIDE_INT l;
1327 HOST_WIDE_INT h;
1329 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1330 coeff = immed_double_const (l, h, mode);
1332 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1333 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1334 ? tem : 0;
1338 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1339 if ((GET_CODE (op1) == CONST_INT
1340 || GET_CODE (op1) == CONST_DOUBLE)
1341 && GET_CODE (op0) == XOR
1342 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1343 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1344 && mode_signbit_p (mode, op1))
1345 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1346 simplify_gen_binary (XOR, mode, op1,
1347 XEXP (op0, 1)));
1349 /* If one of the operands is a PLUS or a MINUS, see if we can
1350 simplify this by the associative law.
1351 Don't use the associative law for floating point.
1352 The inaccuracy makes it nonassociative,
1353 and subtle programs can break if operations are associated. */
1355 if (INTEGRAL_MODE_P (mode)
1356 && (plus_minus_operand_p (op0)
1357 || plus_minus_operand_p (op1))
1358 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1359 return tem;
1361 /* Reassociate floating point addition only when the user
1362 specifies unsafe math optimizations. */
1363 if (FLOAT_MODE_P (mode)
1364 && flag_unsafe_math_optimizations)
1366 tem = simplify_associative_operation (code, mode, op0, op1);
1367 if (tem)
1368 return tem;
1370 break;
1372 case COMPARE:
1373 #ifdef HAVE_cc0
1374 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1375 using cc0, in which case we want to leave it as a COMPARE
1376 so we can distinguish it from a register-register-copy.
1378 In IEEE floating point, x-0 is not the same as x. */
1380 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1381 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1382 && trueop1 == CONST0_RTX (mode))
1383 return op0;
1384 #endif
1386 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1387 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1388 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1389 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1391 rtx xop00 = XEXP (op0, 0);
1392 rtx xop10 = XEXP (op1, 0);
1394 #ifdef HAVE_cc0
1395 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1396 #else
1397 if (REG_P (xop00) && REG_P (xop10)
1398 && GET_MODE (xop00) == GET_MODE (xop10)
1399 && REGNO (xop00) == REGNO (xop10)
1400 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1401 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1402 #endif
1403 return xop00;
1405 break;
1407 case MINUS:
1408 /* We can't assume x-x is 0 even with non-IEEE floating point,
1409 but since it is zero except in very strange circumstances, we
1410 will treat it as zero with -funsafe-math-optimizations. */
1411 if (rtx_equal_p (trueop0, trueop1)
1412 && ! side_effects_p (op0)
1413 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1414 return CONST0_RTX (mode);
1416 /* Change subtraction from zero into negation. (0 - x) is the
1417 same as -x when x is NaN, infinite, or finite and nonzero.
1418 But if the mode has signed zeros, and does not round towards
1419 -infinity, then 0 - 0 is 0, not -0. */
1420 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1421 return simplify_gen_unary (NEG, mode, op1, mode);
1423 /* (-1 - a) is ~a. */
1424 if (trueop0 == constm1_rtx)
1425 return simplify_gen_unary (NOT, mode, op1, mode);
1427 /* Subtracting 0 has no effect unless the mode has signed zeros
1428 and supports rounding towards -infinity. In such a case,
1429 0 - 0 is -0. */
1430 if (!(HONOR_SIGNED_ZEROS (mode)
1431 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1432 && trueop1 == CONST0_RTX (mode))
1433 return op0;
1435 /* See if this is something like X * C - X or vice versa or
1436 if the multiplication is written as a shift. If so, we can
1437 distribute and make a new multiply, shift, or maybe just
1438 have X (if C is 2 in the example above). But don't make
1439 something more expensive than we had before. */
1441 if (SCALAR_INT_MODE_P (mode))
1443 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1444 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1445 rtx lhs = op0, rhs = op1;
1447 if (GET_CODE (lhs) == NEG)
1449 coeff0l = -1;
1450 coeff0h = -1;
1451 lhs = XEXP (lhs, 0);
1453 else if (GET_CODE (lhs) == MULT
1454 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1456 coeff0l = INTVAL (XEXP (lhs, 1));
1457 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1458 lhs = XEXP (lhs, 0);
1460 else if (GET_CODE (lhs) == ASHIFT
1461 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1462 && INTVAL (XEXP (lhs, 1)) >= 0
1463 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1465 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1466 coeff0h = 0;
1467 lhs = XEXP (lhs, 0);
1470 if (GET_CODE (rhs) == NEG)
1472 negcoeff1l = 1;
1473 negcoeff1h = 0;
1474 rhs = XEXP (rhs, 0);
1476 else if (GET_CODE (rhs) == MULT
1477 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1479 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1480 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1481 rhs = XEXP (rhs, 0);
1483 else if (GET_CODE (rhs) == ASHIFT
1484 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1485 && INTVAL (XEXP (rhs, 1)) >= 0
1486 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1488 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1489 negcoeff1h = -1;
1490 rhs = XEXP (rhs, 0);
1493 if (rtx_equal_p (lhs, rhs))
1495 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1496 rtx coeff;
1497 unsigned HOST_WIDE_INT l;
1498 HOST_WIDE_INT h;
1500 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1501 coeff = immed_double_const (l, h, mode);
1503 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1504 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1505 ? tem : 0;
1509 /* (a - (-b)) -> (a + b). True even for IEEE. */
1510 if (GET_CODE (op1) == NEG)
1511 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1513 /* (-x - c) may be simplified as (-c - x). */
1514 if (GET_CODE (op0) == NEG
1515 && (GET_CODE (op1) == CONST_INT
1516 || GET_CODE (op1) == CONST_DOUBLE))
1518 tem = simplify_unary_operation (NEG, mode, op1, mode);
1519 if (tem)
1520 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1523 /* Don't let a relocatable value get a negative coeff. */
1524 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1525 return simplify_gen_binary (PLUS, mode,
1526 op0,
1527 neg_const_int (mode, op1));
1529 /* (x - (x & y)) -> (x & ~y) */
1530 if (GET_CODE (op1) == AND)
1532 if (rtx_equal_p (op0, XEXP (op1, 0)))
1534 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1535 GET_MODE (XEXP (op1, 1)));
1536 return simplify_gen_binary (AND, mode, op0, tem);
1538 if (rtx_equal_p (op0, XEXP (op1, 1)))
1540 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1541 GET_MODE (XEXP (op1, 0)));
1542 return simplify_gen_binary (AND, mode, op0, tem);
1546 /* If one of the operands is a PLUS or a MINUS, see if we can
1547 simplify this by the associative law. This will, for example,
1548 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1549 Don't use the associative law for floating point.
1550 The inaccuracy makes it nonassociative,
1551 and subtle programs can break if operations are associated. */
1553 if (INTEGRAL_MODE_P (mode)
1554 && (plus_minus_operand_p (op0)
1555 || plus_minus_operand_p (op1))
1556 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1557 return tem;
1558 break;
1560 case MULT:
1561 if (trueop1 == constm1_rtx)
1562 return simplify_gen_unary (NEG, mode, op0, mode);
1564 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1565 x is NaN, since x * 0 is then also NaN. Nor is it valid
1566 when the mode has signed zeros, since multiplying a negative
1567 number by 0 will give -0, not 0. */
1568 if (!HONOR_NANS (mode)
1569 && !HONOR_SIGNED_ZEROS (mode)
1570 && trueop1 == CONST0_RTX (mode)
1571 && ! side_effects_p (op0))
1572 return op1;
1574 /* In IEEE floating point, x*1 is not equivalent to x for
1575 signalling NaNs. */
1576 if (!HONOR_SNANS (mode)
1577 && trueop1 == CONST1_RTX (mode))
1578 return op0;
1580 /* Convert multiply by constant power of two into shift unless
1581 we are still generating RTL. This test is a kludge. */
1582 if (GET_CODE (trueop1) == CONST_INT
1583 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1584 /* If the mode is larger than the host word size, and the
1585 uppermost bit is set, then this isn't a power of two due
1586 to implicit sign extension. */
1587 && (width <= HOST_BITS_PER_WIDE_INT
1588 || val != HOST_BITS_PER_WIDE_INT - 1))
1589 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1591 /* Likewise for multipliers wider than a word. */
1592 else if (GET_CODE (trueop1) == CONST_DOUBLE
1593 && (GET_MODE (trueop1) == VOIDmode
1594 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1595 && GET_MODE (op0) == mode
1596 && CONST_DOUBLE_LOW (trueop1) == 0
1597 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1598 return simplify_gen_binary (ASHIFT, mode, op0,
1599 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1601 /* x*2 is x+x and x*(-1) is -x */
1602 if (GET_CODE (trueop1) == CONST_DOUBLE
1603 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1604 && GET_MODE (op0) == mode)
1606 REAL_VALUE_TYPE d;
1607 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1609 if (REAL_VALUES_EQUAL (d, dconst2))
1610 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1612 if (REAL_VALUES_EQUAL (d, dconstm1))
1613 return simplify_gen_unary (NEG, mode, op0, mode);
1616 /* Reassociate multiplication, but for floating point MULTs
1617 only when the user specifies unsafe math optimizations. */
1618 if (! FLOAT_MODE_P (mode)
1619 || flag_unsafe_math_optimizations)
1621 tem = simplify_associative_operation (code, mode, op0, op1);
1622 if (tem)
1623 return tem;
1625 break;
1627 case IOR:
1628 if (trueop1 == const0_rtx)
1629 return op0;
1630 if (GET_CODE (trueop1) == CONST_INT
1631 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1632 == GET_MODE_MASK (mode)))
1633 return op1;
1634 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1635 return op0;
1636 /* A | (~A) -> -1 */
1637 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1638 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1639 && ! side_effects_p (op0)
1640 && SCALAR_INT_MODE_P (mode))
1641 return constm1_rtx;
1642 tem = simplify_associative_operation (code, mode, op0, op1);
1643 if (tem)
1644 return tem;
1645 break;
1647 case XOR:
1648 if (trueop1 == const0_rtx)
1649 return op0;
1650 if (GET_CODE (trueop1) == CONST_INT
1651 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1652 == GET_MODE_MASK (mode)))
1653 return simplify_gen_unary (NOT, mode, op0, mode);
1654 if (rtx_equal_p (trueop0, trueop1)
1655 && ! side_effects_p (op0)
1656 && GET_MODE_CLASS (mode) != MODE_CC)
1657 return CONST0_RTX (mode);
1659 /* Canonicalize XOR of the most significant bit to PLUS. */
1660 if ((GET_CODE (op1) == CONST_INT
1661 || GET_CODE (op1) == CONST_DOUBLE)
1662 && mode_signbit_p (mode, op1))
1663 return simplify_gen_binary (PLUS, mode, op0, op1);
1664 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1665 if ((GET_CODE (op1) == CONST_INT
1666 || GET_CODE (op1) == CONST_DOUBLE)
1667 && GET_CODE (op0) == PLUS
1668 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1669 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1670 && mode_signbit_p (mode, XEXP (op0, 1)))
1671 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1672 simplify_gen_binary (XOR, mode, op1,
1673 XEXP (op0, 1)));
1675 tem = simplify_associative_operation (code, mode, op0, op1);
1676 if (tem)
1677 return tem;
1678 break;
1680 case AND:
1681 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
1682 return trueop1;
1683 /* If we are turning off bits already known off in OP0, we need
1684 not do an AND. */
1685 if (GET_CODE (trueop1) == CONST_INT
1686 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1687 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1688 return op0;
1689 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
1690 && GET_MODE_CLASS (mode) != MODE_CC)
1691 return op0;
1692 /* A & (~A) -> 0 */
1693 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1694 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1695 && ! side_effects_p (op0)
1696 && GET_MODE_CLASS (mode) != MODE_CC)
1697 return CONST0_RTX (mode);
1699 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1700 there are no nonzero bits of C outside of X's mode. */
1701 if ((GET_CODE (op0) == SIGN_EXTEND
1702 || GET_CODE (op0) == ZERO_EXTEND)
1703 && GET_CODE (trueop1) == CONST_INT
1704 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1705 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1706 & INTVAL (trueop1)) == 0)
1708 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1709 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1710 gen_int_mode (INTVAL (trueop1),
1711 imode));
1712 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1715 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1716 ((A & N) + B) & M -> (A + B) & M
1717 Similarly if (N & M) == 0,
1718 ((A | N) + B) & M -> (A + B) & M
1719 and for - instead of + and/or ^ instead of |. */
1720 if (GET_CODE (trueop1) == CONST_INT
1721 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1722 && ~INTVAL (trueop1)
1723 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1724 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1726 rtx pmop[2];
1727 int which;
1729 pmop[0] = XEXP (op0, 0);
1730 pmop[1] = XEXP (op0, 1);
1732 for (which = 0; which < 2; which++)
1734 tem = pmop[which];
1735 switch (GET_CODE (tem))
1737 case AND:
1738 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1739 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1740 == INTVAL (trueop1))
1741 pmop[which] = XEXP (tem, 0);
1742 break;
1743 case IOR:
1744 case XOR:
1745 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1746 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1747 pmop[which] = XEXP (tem, 0);
1748 break;
1749 default:
1750 break;
1754 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1756 tem = simplify_gen_binary (GET_CODE (op0), mode,
1757 pmop[0], pmop[1]);
1758 return simplify_gen_binary (code, mode, tem, op1);
1761 tem = simplify_associative_operation (code, mode, op0, op1);
1762 if (tem)
1763 return tem;
1764 break;
1766 case UDIV:
1767 /* 0/x is 0 (or x&0 if x has side-effects). */
1768 if (trueop0 == CONST0_RTX (mode))
1770 if (side_effects_p (op1))
1771 return simplify_gen_binary (AND, mode, op1, trueop0);
1772 return trueop0;
1774 /* x/1 is x. */
1775 if (trueop1 == CONST1_RTX (mode))
1776 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1777 /* Convert divide by power of two into shift. */
1778 if (GET_CODE (trueop1) == CONST_INT
1779 && (val = exact_log2 (INTVAL (trueop1))) > 0)
1780 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
1781 break;
1783 case DIV:
1784 /* Handle floating point and integers separately. */
1785 if (SCALAR_FLOAT_MODE_P (mode))
1787 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1788 safe for modes with NaNs, since 0.0 / 0.0 will then be
1789 NaN rather than 0.0. Nor is it safe for modes with signed
1790 zeros, since dividing 0 by a negative number gives -0.0 */
1791 if (trueop0 == CONST0_RTX (mode)
1792 && !HONOR_NANS (mode)
1793 && !HONOR_SIGNED_ZEROS (mode)
1794 && ! side_effects_p (op1))
1795 return op0;
1796 /* x/1.0 is x. */
1797 if (trueop1 == CONST1_RTX (mode)
1798 && !HONOR_SNANS (mode))
1799 return op0;
1801 if (GET_CODE (trueop1) == CONST_DOUBLE
1802 && trueop1 != CONST0_RTX (mode))
1804 REAL_VALUE_TYPE d;
1805 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1807 /* x/-1.0 is -x. */
1808 if (REAL_VALUES_EQUAL (d, dconstm1)
1809 && !HONOR_SNANS (mode))
1810 return simplify_gen_unary (NEG, mode, op0, mode);
1812 /* Change FP division by a constant into multiplication.
1813 Only do this with -funsafe-math-optimizations. */
1814 if (flag_unsafe_math_optimizations
1815 && !REAL_VALUES_EQUAL (d, dconst0))
1817 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1818 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1819 return simplify_gen_binary (MULT, mode, op0, tem);
1823 else
1825 /* 0/x is 0 (or x&0 if x has side-effects). */
1826 if (trueop0 == CONST0_RTX (mode))
1828 if (side_effects_p (op1))
1829 return simplify_gen_binary (AND, mode, op1, trueop0);
1830 return trueop0;
1832 /* x/1 is x. */
1833 if (trueop1 == CONST1_RTX (mode))
1834 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1835 /* x/-1 is -x. */
1836 if (trueop1 == constm1_rtx)
1838 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
1839 return simplify_gen_unary (NEG, mode, x, mode);
1842 break;
1844 case UMOD:
1845 /* 0%x is 0 (or x&0 if x has side-effects). */
1846 if (trueop0 == CONST0_RTX (mode))
1848 if (side_effects_p (op1))
1849 return simplify_gen_binary (AND, mode, op1, trueop0);
1850 return trueop0;
1852 /* x%1 is 0 (of x&0 if x has side-effects). */
1853 if (trueop1 == CONST1_RTX (mode))
1855 if (side_effects_p (op0))
1856 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
1857 return CONST0_RTX (mode);
1859 /* Implement modulus by power of two as AND. */
1860 if (GET_CODE (trueop1) == CONST_INT
1861 && exact_log2 (INTVAL (trueop1)) > 0)
1862 return simplify_gen_binary (AND, mode, op0,
1863 GEN_INT (INTVAL (op1) - 1));
1864 break;
1866 case MOD:
1867 /* 0%x is 0 (or x&0 if x has side-effects). */
1868 if (trueop0 == CONST0_RTX (mode))
1870 if (side_effects_p (op1))
1871 return simplify_gen_binary (AND, mode, op1, trueop0);
1872 return trueop0;
1874 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1875 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
1877 if (side_effects_p (op0))
1878 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
1879 return CONST0_RTX (mode);
1881 break;
1883 case ROTATERT:
1884 case ROTATE:
1885 case ASHIFTRT:
1886 /* Rotating ~0 always results in ~0. */
1887 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1888 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1889 && ! side_effects_p (op1))
1890 return op0;
1892 /* Fall through.... */
1894 case ASHIFT:
1895 case LSHIFTRT:
1896 if (trueop1 == CONST0_RTX (mode))
1897 return op0;
1898 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
1899 return op0;
1900 break;
1902 case SMIN:
1903 if (width <= HOST_BITS_PER_WIDE_INT
1904 && GET_CODE (trueop1) == CONST_INT
1905 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1906 && ! side_effects_p (op0))
1907 return op1;
1908 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1909 return op0;
1910 tem = simplify_associative_operation (code, mode, op0, op1);
1911 if (tem)
1912 return tem;
1913 break;
1915 case SMAX:
1916 if (width <= HOST_BITS_PER_WIDE_INT
1917 && GET_CODE (trueop1) == CONST_INT
1918 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1919 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1920 && ! side_effects_p (op0))
1921 return op1;
1922 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1923 return op0;
1924 tem = simplify_associative_operation (code, mode, op0, op1);
1925 if (tem)
1926 return tem;
1927 break;
1929 case UMIN:
1930 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
1931 return op1;
1932 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1933 return op0;
1934 tem = simplify_associative_operation (code, mode, op0, op1);
1935 if (tem)
1936 return tem;
1937 break;
1939 case UMAX:
1940 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1941 return op1;
1942 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1943 return op0;
1944 tem = simplify_associative_operation (code, mode, op0, op1);
1945 if (tem)
1946 return tem;
1947 break;
1949 case SS_PLUS:
1950 case US_PLUS:
1951 case SS_MINUS:
1952 case US_MINUS:
1953 /* ??? There are simplifications that can be done. */
1954 return 0;
1956 case VEC_SELECT:
1957 if (!VECTOR_MODE_P (mode))
1959 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1960 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
1961 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1962 gcc_assert (XVECLEN (trueop1, 0) == 1);
1963 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
1965 if (GET_CODE (trueop0) == CONST_VECTOR)
1966 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
1967 (trueop1, 0, 0)));
1969 else
1971 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1972 gcc_assert (GET_MODE_INNER (mode)
1973 == GET_MODE_INNER (GET_MODE (trueop0)));
1974 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1976 if (GET_CODE (trueop0) == CONST_VECTOR)
1978 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1979 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1980 rtvec v = rtvec_alloc (n_elts);
1981 unsigned int i;
1983 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
1984 for (i = 0; i < n_elts; i++)
1986 rtx x = XVECEXP (trueop1, 0, i);
1988 gcc_assert (GET_CODE (x) == CONST_INT);
1989 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
1990 INTVAL (x));
1993 return gen_rtx_CONST_VECTOR (mode, v);
1996 return 0;
1997 case VEC_CONCAT:
1999 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2000 ? GET_MODE (trueop0)
2001 : GET_MODE_INNER (mode));
2002 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2003 ? GET_MODE (trueop1)
2004 : GET_MODE_INNER (mode));
2006 gcc_assert (VECTOR_MODE_P (mode));
2007 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2008 == GET_MODE_SIZE (mode));
2010 if (VECTOR_MODE_P (op0_mode))
2011 gcc_assert (GET_MODE_INNER (mode)
2012 == GET_MODE_INNER (op0_mode));
2013 else
2014 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2016 if (VECTOR_MODE_P (op1_mode))
2017 gcc_assert (GET_MODE_INNER (mode)
2018 == GET_MODE_INNER (op1_mode));
2019 else
2020 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2022 if ((GET_CODE (trueop0) == CONST_VECTOR
2023 || GET_CODE (trueop0) == CONST_INT
2024 || GET_CODE (trueop0) == CONST_DOUBLE)
2025 && (GET_CODE (trueop1) == CONST_VECTOR
2026 || GET_CODE (trueop1) == CONST_INT
2027 || GET_CODE (trueop1) == CONST_DOUBLE))
2029 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2030 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2031 rtvec v = rtvec_alloc (n_elts);
2032 unsigned int i;
2033 unsigned in_n_elts = 1;
2035 if (VECTOR_MODE_P (op0_mode))
2036 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2037 for (i = 0; i < n_elts; i++)
2039 if (i < in_n_elts)
2041 if (!VECTOR_MODE_P (op0_mode))
2042 RTVEC_ELT (v, i) = trueop0;
2043 else
2044 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2046 else
2048 if (!VECTOR_MODE_P (op1_mode))
2049 RTVEC_ELT (v, i) = trueop1;
2050 else
2051 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2052 i - in_n_elts);
2056 return gen_rtx_CONST_VECTOR (mode, v);
2059 return 0;
2061 default:
2062 gcc_unreachable ();
2065 return 0;
2069 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2070 rtx op0, rtx op1)
2072 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2073 HOST_WIDE_INT val;
2074 unsigned int width = GET_MODE_BITSIZE (mode);
2076 if (VECTOR_MODE_P (mode)
2077 && code != VEC_CONCAT
2078 && GET_CODE (op0) == CONST_VECTOR
2079 && GET_CODE (op1) == CONST_VECTOR)
2081 unsigned n_elts = GET_MODE_NUNITS (mode);
2082 enum machine_mode op0mode = GET_MODE (op0);
2083 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2084 enum machine_mode op1mode = GET_MODE (op1);
2085 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2086 rtvec v = rtvec_alloc (n_elts);
2087 unsigned int i;
2089 gcc_assert (op0_n_elts == n_elts);
2090 gcc_assert (op1_n_elts == n_elts);
2091 for (i = 0; i < n_elts; i++)
2093 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2094 CONST_VECTOR_ELT (op0, i),
2095 CONST_VECTOR_ELT (op1, i));
2096 if (!x)
2097 return 0;
2098 RTVEC_ELT (v, i) = x;
2101 return gen_rtx_CONST_VECTOR (mode, v);
2104 if (VECTOR_MODE_P (mode)
2105 && code == VEC_CONCAT
2106 && CONSTANT_P (op0) && CONSTANT_P (op1))
2108 unsigned n_elts = GET_MODE_NUNITS (mode);
2109 rtvec v = rtvec_alloc (n_elts);
2111 gcc_assert (n_elts >= 2);
2112 if (n_elts == 2)
2114 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2115 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2117 RTVEC_ELT (v, 0) = op0;
2118 RTVEC_ELT (v, 1) = op1;
2120 else
2122 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2123 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2124 unsigned i;
2126 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2127 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2128 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2130 for (i = 0; i < op0_n_elts; ++i)
2131 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2132 for (i = 0; i < op1_n_elts; ++i)
2133 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2136 return gen_rtx_CONST_VECTOR (mode, v);
2139 if (SCALAR_FLOAT_MODE_P (mode)
2140 && GET_CODE (op0) == CONST_DOUBLE
2141 && GET_CODE (op1) == CONST_DOUBLE
2142 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2144 if (code == AND
2145 || code == IOR
2146 || code == XOR)
2148 long tmp0[4];
2149 long tmp1[4];
2150 REAL_VALUE_TYPE r;
2151 int i;
2153 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2154 GET_MODE (op0));
2155 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2156 GET_MODE (op1));
2157 for (i = 0; i < 4; i++)
2159 switch (code)
2161 case AND:
2162 tmp0[i] &= tmp1[i];
2163 break;
2164 case IOR:
2165 tmp0[i] |= tmp1[i];
2166 break;
2167 case XOR:
2168 tmp0[i] ^= tmp1[i];
2169 break;
2170 default:
2171 gcc_unreachable ();
2174 real_from_target (&r, tmp0, mode);
2175 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2177 else
2179 REAL_VALUE_TYPE f0, f1, value, result;
2180 bool inexact;
2182 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2183 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2184 real_convert (&f0, mode, &f0);
2185 real_convert (&f1, mode, &f1);
2187 if (HONOR_SNANS (mode)
2188 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2189 return 0;
2191 if (code == DIV
2192 && REAL_VALUES_EQUAL (f1, dconst0)
2193 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2194 return 0;
2196 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2197 && flag_trapping_math
2198 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2200 int s0 = REAL_VALUE_NEGATIVE (f0);
2201 int s1 = REAL_VALUE_NEGATIVE (f1);
2203 switch (code)
2205 case PLUS:
2206 /* Inf + -Inf = NaN plus exception. */
2207 if (s0 != s1)
2208 return 0;
2209 break;
2210 case MINUS:
2211 /* Inf - Inf = NaN plus exception. */
2212 if (s0 == s1)
2213 return 0;
2214 break;
2215 case DIV:
2216 /* Inf / Inf = NaN plus exception. */
2217 return 0;
2218 default:
2219 break;
2223 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2224 && flag_trapping_math
2225 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2226 || (REAL_VALUE_ISINF (f1)
2227 && REAL_VALUES_EQUAL (f0, dconst0))))
2228 /* Inf * 0 = NaN plus exception. */
2229 return 0;
2231 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2232 &f0, &f1);
2233 real_convert (&result, mode, &value);
2235 /* Don't constant fold this floating point operation if
2236 the result has overflowed and flag_trapping_math. */
2238 if (flag_trapping_math
2239 && MODE_HAS_INFINITIES (mode)
2240 && REAL_VALUE_ISINF (result)
2241 && !REAL_VALUE_ISINF (f0)
2242 && !REAL_VALUE_ISINF (f1))
2243 /* Overflow plus exception. */
2244 return 0;
2246 /* Don't constant fold this floating point operation if the
2247 result may dependent upon the run-time rounding mode and
2248 flag_rounding_math is set, or if GCC's software emulation
2249 is unable to accurately represent the result. */
2251 if ((flag_rounding_math
2252 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2253 && !flag_unsafe_math_optimizations))
2254 && (inexact || !real_identical (&result, &value)))
2255 return NULL_RTX;
2257 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2261 /* We can fold some multi-word operations. */
2262 if (GET_MODE_CLASS (mode) == MODE_INT
2263 && width == HOST_BITS_PER_WIDE_INT * 2
2264 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2265 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2267 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2268 HOST_WIDE_INT h1, h2, hv, ht;
2270 if (GET_CODE (op0) == CONST_DOUBLE)
2271 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2272 else
2273 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2275 if (GET_CODE (op1) == CONST_DOUBLE)
2276 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2277 else
2278 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2280 switch (code)
2282 case MINUS:
2283 /* A - B == A + (-B). */
2284 neg_double (l2, h2, &lv, &hv);
2285 l2 = lv, h2 = hv;
2287 /* Fall through.... */
2289 case PLUS:
2290 add_double (l1, h1, l2, h2, &lv, &hv);
2291 break;
2293 case MULT:
2294 mul_double (l1, h1, l2, h2, &lv, &hv);
2295 break;
2297 case DIV:
2298 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2299 &lv, &hv, &lt, &ht))
2300 return 0;
2301 break;
2303 case MOD:
2304 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2305 &lt, &ht, &lv, &hv))
2306 return 0;
2307 break;
2309 case UDIV:
2310 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2311 &lv, &hv, &lt, &ht))
2312 return 0;
2313 break;
2315 case UMOD:
2316 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2317 &lt, &ht, &lv, &hv))
2318 return 0;
2319 break;
2321 case AND:
2322 lv = l1 & l2, hv = h1 & h2;
2323 break;
2325 case IOR:
2326 lv = l1 | l2, hv = h1 | h2;
2327 break;
2329 case XOR:
2330 lv = l1 ^ l2, hv = h1 ^ h2;
2331 break;
2333 case SMIN:
2334 if (h1 < h2
2335 || (h1 == h2
2336 && ((unsigned HOST_WIDE_INT) l1
2337 < (unsigned HOST_WIDE_INT) l2)))
2338 lv = l1, hv = h1;
2339 else
2340 lv = l2, hv = h2;
2341 break;
2343 case SMAX:
2344 if (h1 > h2
2345 || (h1 == h2
2346 && ((unsigned HOST_WIDE_INT) l1
2347 > (unsigned HOST_WIDE_INT) l2)))
2348 lv = l1, hv = h1;
2349 else
2350 lv = l2, hv = h2;
2351 break;
2353 case UMIN:
2354 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2355 || (h1 == h2
2356 && ((unsigned HOST_WIDE_INT) l1
2357 < (unsigned HOST_WIDE_INT) l2)))
2358 lv = l1, hv = h1;
2359 else
2360 lv = l2, hv = h2;
2361 break;
2363 case UMAX:
2364 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2365 || (h1 == h2
2366 && ((unsigned HOST_WIDE_INT) l1
2367 > (unsigned HOST_WIDE_INT) l2)))
2368 lv = l1, hv = h1;
2369 else
2370 lv = l2, hv = h2;
2371 break;
2373 case LSHIFTRT: case ASHIFTRT:
2374 case ASHIFT:
2375 case ROTATE: case ROTATERT:
2376 if (SHIFT_COUNT_TRUNCATED)
2377 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2379 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2380 return 0;
2382 if (code == LSHIFTRT || code == ASHIFTRT)
2383 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2384 code == ASHIFTRT);
2385 else if (code == ASHIFT)
2386 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2387 else if (code == ROTATE)
2388 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2389 else /* code == ROTATERT */
2390 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2391 break;
2393 default:
2394 return 0;
2397 return immed_double_const (lv, hv, mode);
2400 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2401 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2403 /* Get the integer argument values in two forms:
2404 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2406 arg0 = INTVAL (op0);
2407 arg1 = INTVAL (op1);
2409 if (width < HOST_BITS_PER_WIDE_INT)
2411 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2412 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2414 arg0s = arg0;
2415 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2416 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2418 arg1s = arg1;
2419 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2420 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2422 else
2424 arg0s = arg0;
2425 arg1s = arg1;
2428 /* Compute the value of the arithmetic. */
2430 switch (code)
2432 case PLUS:
2433 val = arg0s + arg1s;
2434 break;
2436 case MINUS:
2437 val = arg0s - arg1s;
2438 break;
2440 case MULT:
2441 val = arg0s * arg1s;
2442 break;
2444 case DIV:
2445 if (arg1s == 0
2446 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2447 && arg1s == -1))
2448 return 0;
2449 val = arg0s / arg1s;
2450 break;
2452 case MOD:
2453 if (arg1s == 0
2454 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2455 && arg1s == -1))
2456 return 0;
2457 val = arg0s % arg1s;
2458 break;
2460 case UDIV:
2461 if (arg1 == 0
2462 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2463 && arg1s == -1))
2464 return 0;
2465 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2466 break;
2468 case UMOD:
2469 if (arg1 == 0
2470 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2471 && arg1s == -1))
2472 return 0;
2473 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2474 break;
2476 case AND:
2477 val = arg0 & arg1;
2478 break;
2480 case IOR:
2481 val = arg0 | arg1;
2482 break;
2484 case XOR:
2485 val = arg0 ^ arg1;
2486 break;
2488 case LSHIFTRT:
2489 case ASHIFT:
2490 case ASHIFTRT:
2491 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
2492 the value is in range. We can't return any old value for
2493 out-of-range arguments because either the middle-end (via
2494 shift_truncation_mask) or the back-end might be relying on
2495 target-specific knowledge. Nor can we rely on
2496 shift_truncation_mask, since the shift might not be part of an
2497 ashlM3, lshrM3 or ashrM3 instruction. */
2498 if (SHIFT_COUNT_TRUNCATED)
2499 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2500 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2501 return 0;
2503 val = (code == ASHIFT
2504 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2505 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2507 /* Sign-extend the result for arithmetic right shifts. */
2508 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2509 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2510 break;
2512 case ROTATERT:
2513 if (arg1 < 0)
2514 return 0;
2516 arg1 %= width;
2517 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2518 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2519 break;
2521 case ROTATE:
2522 if (arg1 < 0)
2523 return 0;
2525 arg1 %= width;
2526 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2527 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2528 break;
2530 case COMPARE:
2531 /* Do nothing here. */
2532 return 0;
2534 case SMIN:
2535 val = arg0s <= arg1s ? arg0s : arg1s;
2536 break;
2538 case UMIN:
2539 val = ((unsigned HOST_WIDE_INT) arg0
2540 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2541 break;
2543 case SMAX:
2544 val = arg0s > arg1s ? arg0s : arg1s;
2545 break;
2547 case UMAX:
2548 val = ((unsigned HOST_WIDE_INT) arg0
2549 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2550 break;
2552 case SS_PLUS:
2553 case US_PLUS:
2554 case SS_MINUS:
2555 case US_MINUS:
2556 /* ??? There are simplifications that can be done. */
2557 return 0;
2559 default:
2560 gcc_unreachable ();
2563 return gen_int_mode (val, mode);
2566 return NULL_RTX;
2571 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2572 PLUS or MINUS.
2574 Rather than test for specific case, we do this by a brute-force method
2575 and do all possible simplifications until no more changes occur. Then
2576 we rebuild the operation. */
2578 struct simplify_plus_minus_op_data
2580 rtx op;
2581 short neg;
2582 short ix;
2585 static int
2586 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2588 const struct simplify_plus_minus_op_data *d1 = p1;
2589 const struct simplify_plus_minus_op_data *d2 = p2;
2590 int result;
2592 result = (commutative_operand_precedence (d2->op)
2593 - commutative_operand_precedence (d1->op));
2594 if (result)
2595 return result;
2596 return d1->ix - d2->ix;
2599 static rtx
2600 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2601 rtx op1)
2603 struct simplify_plus_minus_op_data ops[8];
2604 rtx result, tem;
2605 int n_ops = 2, input_ops = 2;
2606 int first, changed, canonicalized = 0;
2607 int i, j;
2609 memset (ops, 0, sizeof ops);
2611 /* Set up the two operands and then expand them until nothing has been
2612 changed. If we run out of room in our array, give up; this should
2613 almost never happen. */
2615 ops[0].op = op0;
2616 ops[0].neg = 0;
2617 ops[1].op = op1;
2618 ops[1].neg = (code == MINUS);
2622 changed = 0;
2624 for (i = 0; i < n_ops; i++)
2626 rtx this_op = ops[i].op;
2627 int this_neg = ops[i].neg;
2628 enum rtx_code this_code = GET_CODE (this_op);
2630 switch (this_code)
2632 case PLUS:
2633 case MINUS:
2634 if (n_ops == 7)
2635 return NULL_RTX;
2637 ops[n_ops].op = XEXP (this_op, 1);
2638 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2639 n_ops++;
2641 ops[i].op = XEXP (this_op, 0);
2642 input_ops++;
2643 changed = 1;
2644 canonicalized |= this_neg;
2645 break;
2647 case NEG:
2648 ops[i].op = XEXP (this_op, 0);
2649 ops[i].neg = ! this_neg;
2650 changed = 1;
2651 canonicalized = 1;
2652 break;
2654 case CONST:
2655 if (n_ops < 7
2656 && GET_CODE (XEXP (this_op, 0)) == PLUS
2657 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2658 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2660 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2661 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2662 ops[n_ops].neg = this_neg;
2663 n_ops++;
2664 changed = 1;
2665 canonicalized = 1;
2667 break;
2669 case NOT:
2670 /* ~a -> (-a - 1) */
2671 if (n_ops != 7)
2673 ops[n_ops].op = constm1_rtx;
2674 ops[n_ops++].neg = this_neg;
2675 ops[i].op = XEXP (this_op, 0);
2676 ops[i].neg = !this_neg;
2677 changed = 1;
2678 canonicalized = 1;
2680 break;
2682 case CONST_INT:
2683 if (this_neg)
2685 ops[i].op = neg_const_int (mode, this_op);
2686 ops[i].neg = 0;
2687 changed = 1;
2688 canonicalized = 1;
2690 break;
2692 default:
2693 break;
2697 while (changed);
2699 gcc_assert (n_ops >= 2);
2700 if (!canonicalized)
2702 int n_constants = 0;
2704 for (i = 0; i < n_ops; i++)
2705 if (GET_CODE (ops[i].op) == CONST_INT)
2706 n_constants++;
2708 if (n_constants <= 1)
2709 return NULL_RTX;
2712 /* If we only have two operands, we can avoid the loops. */
2713 if (n_ops == 2)
2715 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
2716 rtx lhs, rhs;
2718 /* Get the two operands. Be careful with the order, especially for
2719 the cases where code == MINUS. */
2720 if (ops[0].neg && ops[1].neg)
2722 lhs = gen_rtx_NEG (mode, ops[0].op);
2723 rhs = ops[1].op;
2725 else if (ops[0].neg)
2727 lhs = ops[1].op;
2728 rhs = ops[0].op;
2730 else
2732 lhs = ops[0].op;
2733 rhs = ops[1].op;
2736 return simplify_const_binary_operation (code, mode, lhs, rhs);
2739 /* Now simplify each pair of operands until nothing changes. The first
2740 time through just simplify constants against each other. */
2742 first = 1;
2745 changed = first;
2747 for (i = 0; i < n_ops - 1; i++)
2748 for (j = i + 1; j < n_ops; j++)
2750 rtx lhs = ops[i].op, rhs = ops[j].op;
2751 int lneg = ops[i].neg, rneg = ops[j].neg;
2753 if (lhs != 0 && rhs != 0
2754 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2756 enum rtx_code ncode = PLUS;
2758 if (lneg != rneg)
2760 ncode = MINUS;
2761 if (lneg)
2762 tem = lhs, lhs = rhs, rhs = tem;
2764 else if (swap_commutative_operands_p (lhs, rhs))
2765 tem = lhs, lhs = rhs, rhs = tem;
2767 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2769 /* Reject "simplifications" that just wrap the two
2770 arguments in a CONST. Failure to do so can result
2771 in infinite recursion with simplify_binary_operation
2772 when it calls us to simplify CONST operations. */
2773 if (tem
2774 && ! (GET_CODE (tem) == CONST
2775 && GET_CODE (XEXP (tem, 0)) == ncode
2776 && XEXP (XEXP (tem, 0), 0) == lhs
2777 && XEXP (XEXP (tem, 0), 1) == rhs)
2778 /* Don't allow -x + -1 -> ~x simplifications in the
2779 first pass. This allows us the chance to combine
2780 the -1 with other constants. */
2781 && ! (first
2782 && GET_CODE (tem) == NOT
2783 && XEXP (tem, 0) == rhs))
2785 lneg &= rneg;
2786 if (GET_CODE (tem) == NEG)
2787 tem = XEXP (tem, 0), lneg = !lneg;
2788 if (GET_CODE (tem) == CONST_INT && lneg)
2789 tem = neg_const_int (mode, tem), lneg = 0;
2791 ops[i].op = tem;
2792 ops[i].neg = lneg;
2793 ops[j].op = NULL_RTX;
2794 changed = 1;
2799 first = 0;
2801 while (changed);
2803 /* Pack all the operands to the lower-numbered entries. */
2804 for (i = 0, j = 0; j < n_ops; j++)
2805 if (ops[j].op)
2807 ops[i] = ops[j];
2808 /* Stabilize sort. */
2809 ops[i].ix = i;
2810 i++;
2812 n_ops = i;
2814 /* Sort the operations based on swap_commutative_operands_p. */
2815 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2817 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2818 if (n_ops == 2
2819 && GET_CODE (ops[1].op) == CONST_INT
2820 && CONSTANT_P (ops[0].op)
2821 && ops[0].neg)
2822 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2824 /* We suppressed creation of trivial CONST expressions in the
2825 combination loop to avoid recursion. Create one manually now.
2826 The combination loop should have ensured that there is exactly
2827 one CONST_INT, and the sort will have ensured that it is last
2828 in the array and that any other constant will be next-to-last. */
2830 if (n_ops > 1
2831 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2832 && CONSTANT_P (ops[n_ops - 2].op))
2834 rtx value = ops[n_ops - 1].op;
2835 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2836 value = neg_const_int (mode, value);
2837 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2838 n_ops--;
2841 /* Put a non-negated operand first, if possible. */
2843 for (i = 0; i < n_ops && ops[i].neg; i++)
2844 continue;
2845 if (i == n_ops)
2846 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2847 else if (i != 0)
2849 tem = ops[0].op;
2850 ops[0] = ops[i];
2851 ops[i].op = tem;
2852 ops[i].neg = 1;
2855 /* Now make the result by performing the requested operations. */
2856 result = ops[0].op;
2857 for (i = 1; i < n_ops; i++)
2858 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2859 mode, result, ops[i].op);
2861 return result;
2864 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2865 static bool
2866 plus_minus_operand_p (rtx x)
2868 return GET_CODE (x) == PLUS
2869 || GET_CODE (x) == MINUS
2870 || (GET_CODE (x) == CONST
2871 && GET_CODE (XEXP (x, 0)) == PLUS
2872 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2873 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2876 /* Like simplify_binary_operation except used for relational operators.
2877 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2878 not also be VOIDmode.
2880 CMP_MODE specifies in which mode the comparison is done in, so it is
2881 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2882 the operands or, if both are VOIDmode, the operands are compared in
2883 "infinite precision". */
2885 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2886 enum machine_mode cmp_mode, rtx op0, rtx op1)
2888 rtx tem, trueop0, trueop1;
2890 if (cmp_mode == VOIDmode)
2891 cmp_mode = GET_MODE (op0);
2892 if (cmp_mode == VOIDmode)
2893 cmp_mode = GET_MODE (op1);
2895 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2896 if (tem)
2898 if (SCALAR_FLOAT_MODE_P (mode))
2900 if (tem == const0_rtx)
2901 return CONST0_RTX (mode);
2902 #ifdef FLOAT_STORE_FLAG_VALUE
2904 REAL_VALUE_TYPE val;
2905 val = FLOAT_STORE_FLAG_VALUE (mode);
2906 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2908 #else
2909 return NULL_RTX;
2910 #endif
2912 if (VECTOR_MODE_P (mode))
2914 if (tem == const0_rtx)
2915 return CONST0_RTX (mode);
2916 #ifdef VECTOR_STORE_FLAG_VALUE
2918 int i, units;
2919 rtvec v;
2921 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2922 if (val == NULL_RTX)
2923 return NULL_RTX;
2924 if (val == const1_rtx)
2925 return CONST1_RTX (mode);
2927 units = GET_MODE_NUNITS (mode);
2928 v = rtvec_alloc (units);
2929 for (i = 0; i < units; i++)
2930 RTVEC_ELT (v, i) = val;
2931 return gen_rtx_raw_CONST_VECTOR (mode, v);
2933 #else
2934 return NULL_RTX;
2935 #endif
2938 return tem;
2941 /* For the following tests, ensure const0_rtx is op1. */
2942 if (swap_commutative_operands_p (op0, op1)
2943 || (op0 == const0_rtx && op1 != const0_rtx))
2944 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2946 /* If op0 is a compare, extract the comparison arguments from it. */
2947 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2948 return simplify_relational_operation (code, mode, VOIDmode,
2949 XEXP (op0, 0), XEXP (op0, 1));
2951 if (mode == VOIDmode
2952 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2953 || CC0_P (op0))
2954 return NULL_RTX;
2956 trueop0 = avoid_constant_pool_reference (op0);
2957 trueop1 = avoid_constant_pool_reference (op1);
2958 return simplify_relational_operation_1 (code, mode, cmp_mode,
2959 trueop0, trueop1);
2962 /* This part of simplify_relational_operation is only used when CMP_MODE
2963 is not in class MODE_CC (i.e. it is a real comparison).
2965 MODE is the mode of the result, while CMP_MODE specifies in which
2966 mode the comparison is done in, so it is the mode of the operands. */
2968 static rtx
2969 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2970 enum machine_mode cmp_mode, rtx op0, rtx op1)
2972 enum rtx_code op0code = GET_CODE (op0);
2974 if (GET_CODE (op1) == CONST_INT)
2976 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2978 /* If op0 is a comparison, extract the comparison arguments form it. */
2979 if (code == NE)
2981 if (GET_MODE (op0) == mode)
2982 return simplify_rtx (op0);
2983 else
2984 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2985 XEXP (op0, 0), XEXP (op0, 1));
2987 else if (code == EQ)
2989 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2990 if (new_code != UNKNOWN)
2991 return simplify_gen_relational (new_code, mode, VOIDmode,
2992 XEXP (op0, 0), XEXP (op0, 1));
2997 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2998 if ((code == EQ || code == NE)
2999 && (op0code == PLUS || op0code == MINUS)
3000 && CONSTANT_P (op1)
3001 && CONSTANT_P (XEXP (op0, 1))
3002 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3004 rtx x = XEXP (op0, 0);
3005 rtx c = XEXP (op0, 1);
3007 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3008 cmp_mode, op1, c);
3009 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3012 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3013 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3014 if (code == NE
3015 && op1 == const0_rtx
3016 && GET_MODE_CLASS (mode) == MODE_INT
3017 && cmp_mode != VOIDmode
3018 /* ??? Work-around BImode bugs in the ia64 backend. */
3019 && mode != BImode
3020 && cmp_mode != BImode
3021 && nonzero_bits (op0, cmp_mode) == 1
3022 && STORE_FLAG_VALUE == 1)
3023 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3024 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3025 : lowpart_subreg (mode, op0, cmp_mode);
3027 return NULL_RTX;
3030 /* Check if the given comparison (done in the given MODE) is actually a
3031 tautology or a contradiction.
3032 If no simplification is possible, this function returns zero.
3033 Otherwise, it returns either const_true_rtx or const0_rtx. */
3036 simplify_const_relational_operation (enum rtx_code code,
3037 enum machine_mode mode,
3038 rtx op0, rtx op1)
3040 int equal, op0lt, op0ltu, op1lt, op1ltu;
3041 rtx tem;
3042 rtx trueop0;
3043 rtx trueop1;
3045 gcc_assert (mode != VOIDmode
3046 || (GET_MODE (op0) == VOIDmode
3047 && GET_MODE (op1) == VOIDmode));
3049 /* If op0 is a compare, extract the comparison arguments from it. */
3050 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3052 op1 = XEXP (op0, 1);
3053 op0 = XEXP (op0, 0);
3055 if (GET_MODE (op0) != VOIDmode)
3056 mode = GET_MODE (op0);
3057 else if (GET_MODE (op1) != VOIDmode)
3058 mode = GET_MODE (op1);
3059 else
3060 return 0;
3063 /* We can't simplify MODE_CC values since we don't know what the
3064 actual comparison is. */
3065 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3066 return 0;
3068 /* Make sure the constant is second. */
3069 if (swap_commutative_operands_p (op0, op1))
3071 tem = op0, op0 = op1, op1 = tem;
3072 code = swap_condition (code);
3075 trueop0 = avoid_constant_pool_reference (op0);
3076 trueop1 = avoid_constant_pool_reference (op1);
3078 /* For integer comparisons of A and B maybe we can simplify A - B and can
3079 then simplify a comparison of that with zero. If A and B are both either
3080 a register or a CONST_INT, this can't help; testing for these cases will
3081 prevent infinite recursion here and speed things up.
3083 If CODE is an unsigned comparison, then we can never do this optimization,
3084 because it gives an incorrect result if the subtraction wraps around zero.
3085 ANSI C defines unsigned operations such that they never overflow, and
3086 thus such cases can not be ignored; but we cannot do it even for
3087 signed comparisons for languages such as Java, so test flag_wrapv. */
3089 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3090 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3091 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3092 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3093 /* We cannot do this for == or != if tem is a nonzero address. */
3094 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3095 && code != GTU && code != GEU && code != LTU && code != LEU)
3096 return simplify_const_relational_operation (signed_condition (code),
3097 mode, tem, const0_rtx);
3099 if (flag_unsafe_math_optimizations && code == ORDERED)
3100 return const_true_rtx;
3102 if (flag_unsafe_math_optimizations && code == UNORDERED)
3103 return const0_rtx;
3105 /* For modes without NaNs, if the two operands are equal, we know the
3106 result except if they have side-effects. */
3107 if (! HONOR_NANS (GET_MODE (trueop0))
3108 && rtx_equal_p (trueop0, trueop1)
3109 && ! side_effects_p (trueop0))
3110 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3112 /* If the operands are floating-point constants, see if we can fold
3113 the result. */
3114 else if (GET_CODE (trueop0) == CONST_DOUBLE
3115 && GET_CODE (trueop1) == CONST_DOUBLE
3116 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3118 REAL_VALUE_TYPE d0, d1;
3120 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3121 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3123 /* Comparisons are unordered iff at least one of the values is NaN. */
3124 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3125 switch (code)
3127 case UNEQ:
3128 case UNLT:
3129 case UNGT:
3130 case UNLE:
3131 case UNGE:
3132 case NE:
3133 case UNORDERED:
3134 return const_true_rtx;
3135 case EQ:
3136 case LT:
3137 case GT:
3138 case LE:
3139 case GE:
3140 case LTGT:
3141 case ORDERED:
3142 return const0_rtx;
3143 default:
3144 return 0;
3147 equal = REAL_VALUES_EQUAL (d0, d1);
3148 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3149 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3152 /* Otherwise, see if the operands are both integers. */
3153 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3154 && (GET_CODE (trueop0) == CONST_DOUBLE
3155 || GET_CODE (trueop0) == CONST_INT)
3156 && (GET_CODE (trueop1) == CONST_DOUBLE
3157 || GET_CODE (trueop1) == CONST_INT))
3159 int width = GET_MODE_BITSIZE (mode);
3160 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3161 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3163 /* Get the two words comprising each integer constant. */
3164 if (GET_CODE (trueop0) == CONST_DOUBLE)
3166 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3167 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3169 else
3171 l0u = l0s = INTVAL (trueop0);
3172 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3175 if (GET_CODE (trueop1) == CONST_DOUBLE)
3177 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3178 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3180 else
3182 l1u = l1s = INTVAL (trueop1);
3183 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3186 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3187 we have to sign or zero-extend the values. */
3188 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3190 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3191 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3193 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3194 l0s |= ((HOST_WIDE_INT) (-1) << width);
3196 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3197 l1s |= ((HOST_WIDE_INT) (-1) << width);
3199 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3200 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3202 equal = (h0u == h1u && l0u == l1u);
3203 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3204 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3205 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3206 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3209 /* Otherwise, there are some code-specific tests we can make. */
3210 else
3212 /* Optimize comparisons with upper and lower bounds. */
3213 if (SCALAR_INT_MODE_P (mode)
3214 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3216 rtx mmin, mmax;
3217 int sign;
3219 if (code == GEU
3220 || code == LEU
3221 || code == GTU
3222 || code == LTU)
3223 sign = 0;
3224 else
3225 sign = 1;
3227 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3229 tem = NULL_RTX;
3230 switch (code)
3232 case GEU:
3233 case GE:
3234 /* x >= min is always true. */
3235 if (rtx_equal_p (trueop1, mmin))
3236 tem = const_true_rtx;
3237 else
3238 break;
3240 case LEU:
3241 case LE:
3242 /* x <= max is always true. */
3243 if (rtx_equal_p (trueop1, mmax))
3244 tem = const_true_rtx;
3245 break;
3247 case GTU:
3248 case GT:
3249 /* x > max is always false. */
3250 if (rtx_equal_p (trueop1, mmax))
3251 tem = const0_rtx;
3252 break;
3254 case LTU:
3255 case LT:
3256 /* x < min is always false. */
3257 if (rtx_equal_p (trueop1, mmin))
3258 tem = const0_rtx;
3259 break;
3261 default:
3262 break;
3264 if (tem == const0_rtx
3265 || tem == const_true_rtx)
3266 return tem;
3269 switch (code)
3271 case EQ:
3272 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3273 return const0_rtx;
3274 break;
3276 case NE:
3277 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3278 return const_true_rtx;
3279 break;
3281 case LT:
3282 /* Optimize abs(x) < 0.0. */
3283 if (trueop1 == CONST0_RTX (mode)
3284 && !HONOR_SNANS (mode)
3285 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3287 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3288 : trueop0;
3289 if (GET_CODE (tem) == ABS)
3290 return const0_rtx;
3292 break;
3294 case GE:
3295 /* Optimize abs(x) >= 0.0. */
3296 if (trueop1 == CONST0_RTX (mode)
3297 && !HONOR_NANS (mode)
3298 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3300 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3301 : trueop0;
3302 if (GET_CODE (tem) == ABS)
3303 return const_true_rtx;
3305 break;
3307 case UNGE:
3308 /* Optimize ! (abs(x) < 0.0). */
3309 if (trueop1 == CONST0_RTX (mode))
3311 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3312 : trueop0;
3313 if (GET_CODE (tem) == ABS)
3314 return const_true_rtx;
3316 break;
3318 default:
3319 break;
3322 return 0;
3325 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3326 as appropriate. */
3327 switch (code)
3329 case EQ:
3330 case UNEQ:
3331 return equal ? const_true_rtx : const0_rtx;
3332 case NE:
3333 case LTGT:
3334 return ! equal ? const_true_rtx : const0_rtx;
3335 case LT:
3336 case UNLT:
3337 return op0lt ? const_true_rtx : const0_rtx;
3338 case GT:
3339 case UNGT:
3340 return op1lt ? const_true_rtx : const0_rtx;
3341 case LTU:
3342 return op0ltu ? const_true_rtx : const0_rtx;
3343 case GTU:
3344 return op1ltu ? const_true_rtx : const0_rtx;
3345 case LE:
3346 case UNLE:
3347 return equal || op0lt ? const_true_rtx : const0_rtx;
3348 case GE:
3349 case UNGE:
3350 return equal || op1lt ? const_true_rtx : const0_rtx;
3351 case LEU:
3352 return equal || op0ltu ? const_true_rtx : const0_rtx;
3353 case GEU:
3354 return equal || op1ltu ? const_true_rtx : const0_rtx;
3355 case ORDERED:
3356 return const_true_rtx;
3357 case UNORDERED:
3358 return const0_rtx;
3359 default:
3360 gcc_unreachable ();
3364 /* Simplify CODE, an operation with result mode MODE and three operands,
3365 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3366 a constant. Return 0 if no simplifications is possible. */
3369 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3370 enum machine_mode op0_mode, rtx op0, rtx op1,
3371 rtx op2)
3373 unsigned int width = GET_MODE_BITSIZE (mode);
3375 /* VOIDmode means "infinite" precision. */
3376 if (width == 0)
3377 width = HOST_BITS_PER_WIDE_INT;
3379 switch (code)
3381 case SIGN_EXTRACT:
3382 case ZERO_EXTRACT:
3383 if (GET_CODE (op0) == CONST_INT
3384 && GET_CODE (op1) == CONST_INT
3385 && GET_CODE (op2) == CONST_INT
3386 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3387 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3389 /* Extracting a bit-field from a constant */
3390 HOST_WIDE_INT val = INTVAL (op0);
3392 if (BITS_BIG_ENDIAN)
3393 val >>= (GET_MODE_BITSIZE (op0_mode)
3394 - INTVAL (op2) - INTVAL (op1));
3395 else
3396 val >>= INTVAL (op2);
3398 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3400 /* First zero-extend. */
3401 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3402 /* If desired, propagate sign bit. */
3403 if (code == SIGN_EXTRACT
3404 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3405 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3408 /* Clear the bits that don't belong in our mode,
3409 unless they and our sign bit are all one.
3410 So we get either a reasonable negative value or a reasonable
3411 unsigned value for this mode. */
3412 if (width < HOST_BITS_PER_WIDE_INT
3413 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3414 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3415 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3417 return gen_int_mode (val, mode);
3419 break;
3421 case IF_THEN_ELSE:
3422 if (GET_CODE (op0) == CONST_INT)
3423 return op0 != const0_rtx ? op1 : op2;
3425 /* Convert c ? a : a into "a". */
3426 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3427 return op1;
3429 /* Convert a != b ? a : b into "a". */
3430 if (GET_CODE (op0) == NE
3431 && ! side_effects_p (op0)
3432 && ! HONOR_NANS (mode)
3433 && ! HONOR_SIGNED_ZEROS (mode)
3434 && ((rtx_equal_p (XEXP (op0, 0), op1)
3435 && rtx_equal_p (XEXP (op0, 1), op2))
3436 || (rtx_equal_p (XEXP (op0, 0), op2)
3437 && rtx_equal_p (XEXP (op0, 1), op1))))
3438 return op1;
3440 /* Convert a == b ? a : b into "b". */
3441 if (GET_CODE (op0) == EQ
3442 && ! side_effects_p (op0)
3443 && ! HONOR_NANS (mode)
3444 && ! HONOR_SIGNED_ZEROS (mode)
3445 && ((rtx_equal_p (XEXP (op0, 0), op1)
3446 && rtx_equal_p (XEXP (op0, 1), op2))
3447 || (rtx_equal_p (XEXP (op0, 0), op2)
3448 && rtx_equal_p (XEXP (op0, 1), op1))))
3449 return op2;
3451 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3453 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3454 ? GET_MODE (XEXP (op0, 1))
3455 : GET_MODE (XEXP (op0, 0)));
3456 rtx temp;
3458 /* Look for happy constants in op1 and op2. */
3459 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3461 HOST_WIDE_INT t = INTVAL (op1);
3462 HOST_WIDE_INT f = INTVAL (op2);
3464 if (t == STORE_FLAG_VALUE && f == 0)
3465 code = GET_CODE (op0);
3466 else if (t == 0 && f == STORE_FLAG_VALUE)
3468 enum rtx_code tmp;
3469 tmp = reversed_comparison_code (op0, NULL_RTX);
3470 if (tmp == UNKNOWN)
3471 break;
3472 code = tmp;
3474 else
3475 break;
3477 return simplify_gen_relational (code, mode, cmp_mode,
3478 XEXP (op0, 0), XEXP (op0, 1));
3481 if (cmp_mode == VOIDmode)
3482 cmp_mode = op0_mode;
3483 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3484 cmp_mode, XEXP (op0, 0),
3485 XEXP (op0, 1));
3487 /* See if any simplifications were possible. */
3488 if (temp)
3490 if (GET_CODE (temp) == CONST_INT)
3491 return temp == const0_rtx ? op2 : op1;
3492 else if (temp)
3493 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3496 break;
3498 case VEC_MERGE:
3499 gcc_assert (GET_MODE (op0) == mode);
3500 gcc_assert (GET_MODE (op1) == mode);
3501 gcc_assert (VECTOR_MODE_P (mode));
3502 op2 = avoid_constant_pool_reference (op2);
3503 if (GET_CODE (op2) == CONST_INT)
3505 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3506 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3507 int mask = (1 << n_elts) - 1;
3509 if (!(INTVAL (op2) & mask))
3510 return op1;
3511 if ((INTVAL (op2) & mask) == mask)
3512 return op0;
3514 op0 = avoid_constant_pool_reference (op0);
3515 op1 = avoid_constant_pool_reference (op1);
3516 if (GET_CODE (op0) == CONST_VECTOR
3517 && GET_CODE (op1) == CONST_VECTOR)
3519 rtvec v = rtvec_alloc (n_elts);
3520 unsigned int i;
3522 for (i = 0; i < n_elts; i++)
3523 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3524 ? CONST_VECTOR_ELT (op0, i)
3525 : CONST_VECTOR_ELT (op1, i));
3526 return gen_rtx_CONST_VECTOR (mode, v);
3529 break;
3531 default:
3532 gcc_unreachable ();
3535 return 0;
3538 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3539 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3541 Works by unpacking OP into a collection of 8-bit values
3542 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3543 and then repacking them again for OUTERMODE. */
3545 static rtx
3546 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3547 enum machine_mode innermode, unsigned int byte)
3549 /* We support up to 512-bit values (for V8DFmode). */
3550 enum {
3551 max_bitsize = 512,
3552 value_bit = 8,
3553 value_mask = (1 << value_bit) - 1
3555 unsigned char value[max_bitsize / value_bit];
3556 int value_start;
3557 int i;
3558 int elem;
3560 int num_elem;
3561 rtx * elems;
3562 int elem_bitsize;
3563 rtx result_s;
3564 rtvec result_v = NULL;
3565 enum mode_class outer_class;
3566 enum machine_mode outer_submode;
3568 /* Some ports misuse CCmode. */
3569 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3570 return op;
3572 /* We have no way to represent a complex constant at the rtl level. */
3573 if (COMPLEX_MODE_P (outermode))
3574 return NULL_RTX;
3576 /* Unpack the value. */
3578 if (GET_CODE (op) == CONST_VECTOR)
3580 num_elem = CONST_VECTOR_NUNITS (op);
3581 elems = &CONST_VECTOR_ELT (op, 0);
3582 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3584 else
3586 num_elem = 1;
3587 elems = &op;
3588 elem_bitsize = max_bitsize;
3590 /* If this asserts, it is too complicated; reducing value_bit may help. */
3591 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3592 /* I don't know how to handle endianness of sub-units. */
3593 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3595 for (elem = 0; elem < num_elem; elem++)
3597 unsigned char * vp;
3598 rtx el = elems[elem];
3600 /* Vectors are kept in target memory order. (This is probably
3601 a mistake.) */
3603 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3604 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3605 / BITS_PER_UNIT);
3606 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3607 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3608 unsigned bytele = (subword_byte % UNITS_PER_WORD
3609 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3610 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3613 switch (GET_CODE (el))
3615 case CONST_INT:
3616 for (i = 0;
3617 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3618 i += value_bit)
3619 *vp++ = INTVAL (el) >> i;
3620 /* CONST_INTs are always logically sign-extended. */
3621 for (; i < elem_bitsize; i += value_bit)
3622 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3623 break;
3625 case CONST_DOUBLE:
3626 if (GET_MODE (el) == VOIDmode)
3628 /* If this triggers, someone should have generated a
3629 CONST_INT instead. */
3630 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3632 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3633 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3634 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3636 *vp++
3637 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3638 i += value_bit;
3640 /* It shouldn't matter what's done here, so fill it with
3641 zero. */
3642 for (; i < elem_bitsize; i += value_bit)
3643 *vp++ = 0;
3645 else
3647 long tmp[max_bitsize / 32];
3648 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3650 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
3651 gcc_assert (bitsize <= elem_bitsize);
3652 gcc_assert (bitsize % value_bit == 0);
3654 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3655 GET_MODE (el));
3657 /* real_to_target produces its result in words affected by
3658 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3659 and use WORDS_BIG_ENDIAN instead; see the documentation
3660 of SUBREG in rtl.texi. */
3661 for (i = 0; i < bitsize; i += value_bit)
3663 int ibase;
3664 if (WORDS_BIG_ENDIAN)
3665 ibase = bitsize - 1 - i;
3666 else
3667 ibase = i;
3668 *vp++ = tmp[ibase / 32] >> i % 32;
3671 /* It shouldn't matter what's done here, so fill it with
3672 zero. */
3673 for (; i < elem_bitsize; i += value_bit)
3674 *vp++ = 0;
3676 break;
3678 default:
3679 gcc_unreachable ();
3683 /* Now, pick the right byte to start with. */
3684 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3685 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3686 will already have offset 0. */
3687 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3689 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3690 - byte);
3691 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3692 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3693 byte = (subword_byte % UNITS_PER_WORD
3694 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3697 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3698 so if it's become negative it will instead be very large.) */
3699 gcc_assert (byte < GET_MODE_SIZE (innermode));
3701 /* Convert from bytes to chunks of size value_bit. */
3702 value_start = byte * (BITS_PER_UNIT / value_bit);
3704 /* Re-pack the value. */
3706 if (VECTOR_MODE_P (outermode))
3708 num_elem = GET_MODE_NUNITS (outermode);
3709 result_v = rtvec_alloc (num_elem);
3710 elems = &RTVEC_ELT (result_v, 0);
3711 outer_submode = GET_MODE_INNER (outermode);
3713 else
3715 num_elem = 1;
3716 elems = &result_s;
3717 outer_submode = outermode;
3720 outer_class = GET_MODE_CLASS (outer_submode);
3721 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3723 gcc_assert (elem_bitsize % value_bit == 0);
3724 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3726 for (elem = 0; elem < num_elem; elem++)
3728 unsigned char *vp;
3730 /* Vectors are stored in target memory order. (This is probably
3731 a mistake.) */
3733 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3734 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3735 / BITS_PER_UNIT);
3736 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3737 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3738 unsigned bytele = (subword_byte % UNITS_PER_WORD
3739 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3740 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3743 switch (outer_class)
3745 case MODE_INT:
3746 case MODE_PARTIAL_INT:
3748 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3750 for (i = 0;
3751 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3752 i += value_bit)
3753 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3754 for (; i < elem_bitsize; i += value_bit)
3755 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3756 << (i - HOST_BITS_PER_WIDE_INT));
3758 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3759 know why. */
3760 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3761 elems[elem] = gen_int_mode (lo, outer_submode);
3762 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
3763 elems[elem] = immed_double_const (lo, hi, outer_submode);
3764 else
3765 return NULL_RTX;
3767 break;
3769 case MODE_FLOAT:
3771 REAL_VALUE_TYPE r;
3772 long tmp[max_bitsize / 32];
3774 /* real_from_target wants its input in words affected by
3775 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3776 and use WORDS_BIG_ENDIAN instead; see the documentation
3777 of SUBREG in rtl.texi. */
3778 for (i = 0; i < max_bitsize / 32; i++)
3779 tmp[i] = 0;
3780 for (i = 0; i < elem_bitsize; i += value_bit)
3782 int ibase;
3783 if (WORDS_BIG_ENDIAN)
3784 ibase = elem_bitsize - 1 - i;
3785 else
3786 ibase = i;
3787 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3790 real_from_target (&r, tmp, outer_submode);
3791 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3793 break;
3795 default:
3796 gcc_unreachable ();
3799 if (VECTOR_MODE_P (outermode))
3800 return gen_rtx_CONST_VECTOR (outermode, result_v);
3801 else
3802 return result_s;
3805 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3806 Return 0 if no simplifications are possible. */
3808 simplify_subreg (enum machine_mode outermode, rtx op,
3809 enum machine_mode innermode, unsigned int byte)
3811 /* Little bit of sanity checking. */
3812 gcc_assert (innermode != VOIDmode);
3813 gcc_assert (outermode != VOIDmode);
3814 gcc_assert (innermode != BLKmode);
3815 gcc_assert (outermode != BLKmode);
3817 gcc_assert (GET_MODE (op) == innermode
3818 || GET_MODE (op) == VOIDmode);
3820 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3821 gcc_assert (byte < GET_MODE_SIZE (innermode));
3823 if (outermode == innermode && !byte)
3824 return op;
3826 if (GET_CODE (op) == CONST_INT
3827 || GET_CODE (op) == CONST_DOUBLE
3828 || GET_CODE (op) == CONST_VECTOR)
3829 return simplify_immed_subreg (outermode, op, innermode, byte);
3831 /* Changing mode twice with SUBREG => just change it once,
3832 or not at all if changing back op starting mode. */
3833 if (GET_CODE (op) == SUBREG)
3835 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3836 int final_offset = byte + SUBREG_BYTE (op);
3837 rtx newx;
3839 if (outermode == innermostmode
3840 && byte == 0 && SUBREG_BYTE (op) == 0)
3841 return SUBREG_REG (op);
3843 /* The SUBREG_BYTE represents offset, as if the value were stored
3844 in memory. Irritating exception is paradoxical subreg, where
3845 we define SUBREG_BYTE to be 0. On big endian machines, this
3846 value should be negative. For a moment, undo this exception. */
3847 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3849 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3850 if (WORDS_BIG_ENDIAN)
3851 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3852 if (BYTES_BIG_ENDIAN)
3853 final_offset += difference % UNITS_PER_WORD;
3855 if (SUBREG_BYTE (op) == 0
3856 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3858 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3859 if (WORDS_BIG_ENDIAN)
3860 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3861 if (BYTES_BIG_ENDIAN)
3862 final_offset += difference % UNITS_PER_WORD;
3865 /* See whether resulting subreg will be paradoxical. */
3866 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3868 /* In nonparadoxical subregs we can't handle negative offsets. */
3869 if (final_offset < 0)
3870 return NULL_RTX;
3871 /* Bail out in case resulting subreg would be incorrect. */
3872 if (final_offset % GET_MODE_SIZE (outermode)
3873 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3874 return NULL_RTX;
3876 else
3878 int offset = 0;
3879 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3881 /* In paradoxical subreg, see if we are still looking on lower part.
3882 If so, our SUBREG_BYTE will be 0. */
3883 if (WORDS_BIG_ENDIAN)
3884 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3885 if (BYTES_BIG_ENDIAN)
3886 offset += difference % UNITS_PER_WORD;
3887 if (offset == final_offset)
3888 final_offset = 0;
3889 else
3890 return NULL_RTX;
3893 /* Recurse for further possible simplifications. */
3894 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3895 final_offset);
3896 if (newx)
3897 return newx;
3898 if (validate_subreg (outermode, innermostmode,
3899 SUBREG_REG (op), final_offset))
3900 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3901 return NULL_RTX;
3904 /* SUBREG of a hard register => just change the register number
3905 and/or mode. If the hard register is not valid in that mode,
3906 suppress this simplification. If the hard register is the stack,
3907 frame, or argument pointer, leave this as a SUBREG. */
3909 if (REG_P (op)
3910 && REGNO (op) < FIRST_PSEUDO_REGISTER
3911 #ifdef CANNOT_CHANGE_MODE_CLASS
3912 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3913 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3914 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3915 #endif
3916 && ((reload_completed && !frame_pointer_needed)
3917 || (REGNO (op) != FRAME_POINTER_REGNUM
3918 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3919 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3920 #endif
3922 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3923 && REGNO (op) != ARG_POINTER_REGNUM
3924 #endif
3925 && REGNO (op) != STACK_POINTER_REGNUM
3926 && subreg_offset_representable_p (REGNO (op), innermode,
3927 byte, outermode))
3929 unsigned int regno = REGNO (op);
3930 unsigned int final_regno
3931 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3933 /* ??? We do allow it if the current REG is not valid for
3934 its mode. This is a kludge to work around how float/complex
3935 arguments are passed on 32-bit SPARC and should be fixed. */
3936 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3937 || ! HARD_REGNO_MODE_OK (regno, innermode))
3939 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3941 /* Propagate original regno. We don't have any way to specify
3942 the offset inside original regno, so do so only for lowpart.
3943 The information is used only by alias analysis that can not
3944 grog partial register anyway. */
3946 if (subreg_lowpart_offset (outermode, innermode) == byte)
3947 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3948 return x;
3952 /* If we have a SUBREG of a register that we are replacing and we are
3953 replacing it with a MEM, make a new MEM and try replacing the
3954 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3955 or if we would be widening it. */
3957 if (MEM_P (op)
3958 && ! mode_dependent_address_p (XEXP (op, 0))
3959 /* Allow splitting of volatile memory references in case we don't
3960 have instruction to move the whole thing. */
3961 && (! MEM_VOLATILE_P (op)
3962 || ! have_insn_for (SET, innermode))
3963 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3964 return adjust_address_nv (op, outermode, byte);
3966 /* Handle complex values represented as CONCAT
3967 of real and imaginary part. */
3968 if (GET_CODE (op) == CONCAT)
3970 unsigned int inner_size, final_offset;
3971 rtx part, res;
3973 inner_size = GET_MODE_UNIT_SIZE (innermode);
3974 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3975 final_offset = byte % inner_size;
3976 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3977 return NULL_RTX;
3979 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3980 if (res)
3981 return res;
3982 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3983 return gen_rtx_SUBREG (outermode, part, final_offset);
3984 return NULL_RTX;
3987 /* Optimize SUBREG truncations of zero and sign extended values. */
3988 if ((GET_CODE (op) == ZERO_EXTEND
3989 || GET_CODE (op) == SIGN_EXTEND)
3990 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3992 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3994 /* If we're requesting the lowpart of a zero or sign extension,
3995 there are three possibilities. If the outermode is the same
3996 as the origmode, we can omit both the extension and the subreg.
3997 If the outermode is not larger than the origmode, we can apply
3998 the truncation without the extension. Finally, if the outermode
3999 is larger than the origmode, but both are integer modes, we
4000 can just extend to the appropriate mode. */
4001 if (bitpos == 0)
4003 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4004 if (outermode == origmode)
4005 return XEXP (op, 0);
4006 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4007 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4008 subreg_lowpart_offset (outermode,
4009 origmode));
4010 if (SCALAR_INT_MODE_P (outermode))
4011 return simplify_gen_unary (GET_CODE (op), outermode,
4012 XEXP (op, 0), origmode);
4015 /* A SUBREG resulting from a zero extension may fold to zero if
4016 it extracts higher bits that the ZERO_EXTEND's source bits. */
4017 if (GET_CODE (op) == ZERO_EXTEND
4018 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4019 return CONST0_RTX (outermode);
4022 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4023 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4024 the outer subreg is effectively a truncation to the original mode. */
4025 if ((GET_CODE (op) == LSHIFTRT
4026 || GET_CODE (op) == ASHIFTRT)
4027 && SCALAR_INT_MODE_P (outermode)
4028 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4029 to avoid the possibility that an outer LSHIFTRT shifts by more
4030 than the sign extension's sign_bit_copies and introduces zeros
4031 into the high bits of the result. */
4032 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4033 && GET_CODE (XEXP (op, 1)) == CONST_INT
4034 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4035 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4036 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4037 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4038 return simplify_gen_binary (ASHIFTRT, outermode,
4039 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4041 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4042 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4043 the outer subreg is effectively a truncation to the original mode. */
4044 if ((GET_CODE (op) == LSHIFTRT
4045 || GET_CODE (op) == ASHIFTRT)
4046 && SCALAR_INT_MODE_P (outermode)
4047 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4048 && GET_CODE (XEXP (op, 1)) == CONST_INT
4049 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4050 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4051 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4052 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4053 return simplify_gen_binary (LSHIFTRT, outermode,
4054 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4056 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4057 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4058 the outer subreg is effectively a truncation to the original mode. */
4059 if (GET_CODE (op) == ASHIFT
4060 && SCALAR_INT_MODE_P (outermode)
4061 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4062 && GET_CODE (XEXP (op, 1)) == CONST_INT
4063 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4064 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4065 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4066 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4067 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4068 return simplify_gen_binary (ASHIFT, outermode,
4069 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4071 return NULL_RTX;
4074 /* Make a SUBREG operation or equivalent if it folds. */
4077 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4078 enum machine_mode innermode, unsigned int byte)
4080 rtx newx;
4082 newx = simplify_subreg (outermode, op, innermode, byte);
4083 if (newx)
4084 return newx;
4086 if (GET_CODE (op) == SUBREG
4087 || GET_CODE (op) == CONCAT
4088 || GET_MODE (op) == VOIDmode)
4089 return NULL_RTX;
4091 if (validate_subreg (outermode, innermode, op, byte))
4092 return gen_rtx_SUBREG (outermode, op, byte);
4094 return NULL_RTX;
4097 /* Simplify X, an rtx expression.
4099 Return the simplified expression or NULL if no simplifications
4100 were possible.
4102 This is the preferred entry point into the simplification routines;
4103 however, we still allow passes to call the more specific routines.
4105 Right now GCC has three (yes, three) major bodies of RTL simplification
4106 code that need to be unified.
4108 1. fold_rtx in cse.c. This code uses various CSE specific
4109 information to aid in RTL simplification.
4111 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4112 it uses combine specific information to aid in RTL
4113 simplification.
4115 3. The routines in this file.
4118 Long term we want to only have one body of simplification code; to
4119 get to that state I recommend the following steps:
4121 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4122 which are not pass dependent state into these routines.
4124 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4125 use this routine whenever possible.
4127 3. Allow for pass dependent state to be provided to these
4128 routines and add simplifications based on the pass dependent
4129 state. Remove code from cse.c & combine.c that becomes
4130 redundant/dead.
4132 It will take time, but ultimately the compiler will be easier to
4133 maintain and improve. It's totally silly that when we add a
4134 simplification that it needs to be added to 4 places (3 for RTL
4135 simplification and 1 for tree simplification. */
4138 simplify_rtx (rtx x)
4140 enum rtx_code code = GET_CODE (x);
4141 enum machine_mode mode = GET_MODE (x);
4143 switch (GET_RTX_CLASS (code))
4145 case RTX_UNARY:
4146 return simplify_unary_operation (code, mode,
4147 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4148 case RTX_COMM_ARITH:
4149 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4150 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4152 /* Fall through.... */
4154 case RTX_BIN_ARITH:
4155 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4157 case RTX_TERNARY:
4158 case RTX_BITFIELD_OPS:
4159 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4160 XEXP (x, 0), XEXP (x, 1),
4161 XEXP (x, 2));
4163 case RTX_COMPARE:
4164 case RTX_COMM_COMPARE:
4165 return simplify_relational_operation (code, mode,
4166 ((GET_MODE (XEXP (x, 0))
4167 != VOIDmode)
4168 ? GET_MODE (XEXP (x, 0))
4169 : GET_MODE (XEXP (x, 1))),
4170 XEXP (x, 0),
4171 XEXP (x, 1));
4173 case RTX_EXTRA:
4174 if (code == SUBREG)
4175 return simplify_gen_subreg (mode, SUBREG_REG (x),
4176 GET_MODE (SUBREG_REG (x)),
4177 SUBREG_BYTE (x));
4178 break;
4180 case RTX_OBJ:
4181 if (code == LO_SUM)
4183 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4184 if (GET_CODE (XEXP (x, 0)) == HIGH
4185 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4186 return XEXP (x, 1);
4188 break;
4190 default:
4191 break;
4193 return NULL;