* gimplify.c (find_single_pointer_decl_1): New static function.
[official-gcc.git] / gcc / simplify-rtx.c
blob44a1660e6882477101a18beb76058ed2924f20b1
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 rtx, rtx, rtx, rtx);
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
69 static rtx
70 neg_const_int (enum machine_mode mode, rtx i)
72 return gen_int_mode (- INTVAL (i), mode);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
78 bool
79 mode_signbit_p (enum machine_mode mode, rtx x)
81 unsigned HOST_WIDE_INT val;
82 unsigned int width;
84 if (GET_MODE_CLASS (mode) != MODE_INT)
85 return false;
87 width = GET_MODE_BITSIZE (mode);
88 if (width == 0)
89 return false;
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
93 val = INTVAL (x);
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
101 else
102 return false;
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114 rtx op1)
116 rtx tem;
118 /* Put complex operands first and constants second if commutative. */
119 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
120 && swap_commutative_operands_p (op0, op1))
121 tem = op0, op0 = op1, op1 = tem;
123 /* If this simplifies, do it. */
124 tem = simplify_binary_operation (code, mode, op0, op1);
125 if (tem)
126 return tem;
128 /* Handle addition and subtraction specially. Otherwise, just form
129 the operation. */
131 if (code == PLUS || code == MINUS)
133 tem = simplify_plus_minus (code, mode, op0, op1, 1);
134 if (tem)
135 return tem;
138 return gen_rtx_fmt_ee (code, mode, op0, op1);
141 /* If X is a MEM referencing the constant pool, return the real value.
142 Otherwise return X. */
144 avoid_constant_pool_reference (rtx x)
146 rtx c, tmp, addr;
147 enum machine_mode cmode;
148 HOST_WIDE_INT offset = 0;
150 switch (GET_CODE (x))
152 case MEM:
153 break;
155 case FLOAT_EXTEND:
156 /* Handle float extensions of constant pool references. */
157 tmp = XEXP (x, 0);
158 c = avoid_constant_pool_reference (tmp);
159 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
161 REAL_VALUE_TYPE d;
163 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
164 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
166 return x;
168 default:
169 return x;
172 addr = XEXP (x, 0);
174 /* Call target hook to avoid the effects of -fpic etc.... */
175 addr = targetm.delegitimize_address (addr);
177 /* Split the address into a base and integer offset. */
178 if (GET_CODE (addr) == CONST
179 && GET_CODE (XEXP (addr, 0)) == PLUS
180 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
182 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
183 addr = XEXP (XEXP (addr, 0), 0);
186 if (GET_CODE (addr) == LO_SUM)
187 addr = XEXP (addr, 1);
189 /* If this is a constant pool reference, we can turn it into its
190 constant and hope that simplifications happen. */
191 if (GET_CODE (addr) == SYMBOL_REF
192 && CONSTANT_POOL_ADDRESS_P (addr))
194 c = get_pool_constant (addr);
195 cmode = get_pool_mode (addr);
197 /* If we're accessing the constant in a different mode than it was
198 originally stored, attempt to fix that up via subreg simplifications.
199 If that fails we have no choice but to return the original memory. */
200 if (offset != 0 || cmode != GET_MODE (x))
202 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
203 if (tem && CONSTANT_P (tem))
204 return tem;
206 else
207 return c;
210 return x;
213 /* Return true if X is a MEM referencing the constant pool. */
215 bool
216 constant_pool_reference_p (rtx x)
218 return avoid_constant_pool_reference (x) != x;
221 /* Make a unary operation by first seeing if it folds and otherwise making
222 the specified operation. */
225 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
226 enum machine_mode op_mode)
228 rtx tem;
230 /* If this simplifies, use it. */
231 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
232 return tem;
234 return gen_rtx_fmt_e (code, mode, op);
237 /* Likewise for ternary operations. */
240 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
241 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
243 rtx tem;
245 /* If this simplifies, use it. */
246 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
247 op0, op1, op2)))
248 return tem;
250 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
253 /* Likewise, for relational operations.
254 CMP_MODE specifies mode comparison is done in. */
257 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
258 enum machine_mode cmp_mode, rtx op0, rtx op1)
260 rtx tem;
262 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
263 op0, op1)))
264 return tem;
266 return gen_rtx_fmt_ee (code, mode, op0, op1);
269 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
270 resulting RTX. Return a new RTX which is as simplified as possible. */
273 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
275 enum rtx_code code = GET_CODE (x);
276 enum machine_mode mode = GET_MODE (x);
277 enum machine_mode op_mode;
278 rtx op0, op1, op2;
280 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
281 to build a new expression substituting recursively. If we can't do
282 anything, return our input. */
284 if (x == old_rtx)
285 return new_rtx;
287 switch (GET_RTX_CLASS (code))
289 case RTX_UNARY:
290 op0 = XEXP (x, 0);
291 op_mode = GET_MODE (op0);
292 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
293 if (op0 == XEXP (x, 0))
294 return x;
295 return simplify_gen_unary (code, mode, op0, op_mode);
297 case RTX_BIN_ARITH:
298 case RTX_COMM_ARITH:
299 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302 return x;
303 return simplify_gen_binary (code, mode, op0, op1);
305 case RTX_COMPARE:
306 case RTX_COMM_COMPARE:
307 op0 = XEXP (x, 0);
308 op1 = XEXP (x, 1);
309 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
310 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
311 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
313 return x;
314 return simplify_gen_relational (code, mode, op_mode, op0, op1);
316 case RTX_TERNARY:
317 case RTX_BITFIELD_OPS:
318 op0 = XEXP (x, 0);
319 op_mode = GET_MODE (op0);
320 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
321 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
322 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
323 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
324 return x;
325 if (op_mode == VOIDmode)
326 op_mode = GET_MODE (op0);
327 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
329 case RTX_EXTRA:
330 /* The only case we try to handle is a SUBREG. */
331 if (code == SUBREG)
333 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
334 if (op0 == SUBREG_REG (x))
335 return x;
336 op0 = simplify_gen_subreg (GET_MODE (x), op0,
337 GET_MODE (SUBREG_REG (x)),
338 SUBREG_BYTE (x));
339 return op0 ? op0 : x;
341 break;
343 case RTX_OBJ:
344 if (code == MEM)
346 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
347 if (op0 == XEXP (x, 0))
348 return x;
349 return replace_equiv_address_nv (x, op0);
351 else if (code == LO_SUM)
353 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
354 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
356 /* (lo_sum (high x) x) -> x */
357 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
358 return op1;
360 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
361 return x;
362 return gen_rtx_LO_SUM (mode, op0, op1);
364 else if (code == REG)
366 if (rtx_equal_p (x, old_rtx))
367 return new_rtx;
369 break;
371 default:
372 break;
374 return x;
377 /* Try to simplify a unary operation CODE whose output mode is to be
378 MODE with input operand OP whose mode was originally OP_MODE.
379 Return zero if no simplification can be made. */
381 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
382 rtx op, enum machine_mode op_mode)
384 rtx trueop, tem;
386 if (GET_CODE (op) == CONST)
387 op = XEXP (op, 0);
389 trueop = avoid_constant_pool_reference (op);
391 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
392 if (tem)
393 return tem;
395 return simplify_unary_operation_1 (code, mode, op);
398 /* Perform some simplifications we can do even if the operands
399 aren't constant. */
400 static rtx
401 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
403 enum rtx_code reversed;
404 rtx temp;
406 switch (code)
408 case NOT:
409 /* (not (not X)) == X. */
410 if (GET_CODE (op) == NOT)
411 return XEXP (op, 0);
413 /* (not (eq X Y)) == (ne X Y), etc. */
414 if (COMPARISON_P (op)
415 && (mode == BImode || STORE_FLAG_VALUE == -1)
416 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
417 return simplify_gen_relational (reversed, mode, VOIDmode,
418 XEXP (op, 0), XEXP (op, 1));
420 /* (not (plus X -1)) can become (neg X). */
421 if (GET_CODE (op) == PLUS
422 && XEXP (op, 1) == constm1_rtx)
423 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
425 /* Similarly, (not (neg X)) is (plus X -1). */
426 if (GET_CODE (op) == NEG)
427 return plus_constant (XEXP (op, 0), -1);
429 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
430 if (GET_CODE (op) == XOR
431 && GET_CODE (XEXP (op, 1)) == CONST_INT
432 && (temp = simplify_unary_operation (NOT, mode,
433 XEXP (op, 1), mode)) != 0)
434 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
436 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
437 if (GET_CODE (op) == PLUS
438 && GET_CODE (XEXP (op, 1)) == CONST_INT
439 && mode_signbit_p (mode, XEXP (op, 1))
440 && (temp = simplify_unary_operation (NOT, mode,
441 XEXP (op, 1), mode)) != 0)
442 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
445 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
446 operands other than 1, but that is not valid. We could do a
447 similar simplification for (not (lshiftrt C X)) where C is
448 just the sign bit, but this doesn't seem common enough to
449 bother with. */
450 if (GET_CODE (op) == ASHIFT
451 && XEXP (op, 0) == const1_rtx)
453 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
454 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
457 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
458 by reversing the comparison code if valid. */
459 if (STORE_FLAG_VALUE == -1
460 && COMPARISON_P (op)
461 && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
462 return simplify_gen_relational (reversed, mode, VOIDmode,
463 XEXP (op, 0), XEXP (op, 1));
465 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
466 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
467 so we can perform the above simplification. */
469 if (STORE_FLAG_VALUE == -1
470 && GET_CODE (op) == ASHIFTRT
471 && GET_CODE (XEXP (op, 1)) == CONST_INT
472 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
473 return simplify_gen_relational (GE, mode, VOIDmode,
474 XEXP (op, 0), const0_rtx);
476 break;
478 case NEG:
479 /* (neg (neg X)) == X. */
480 if (GET_CODE (op) == NEG)
481 return XEXP (op, 0);
483 /* (neg (plus X 1)) can become (not X). */
484 if (GET_CODE (op) == PLUS
485 && XEXP (op, 1) == const1_rtx)
486 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
488 /* Similarly, (neg (not X)) is (plus X 1). */
489 if (GET_CODE (op) == NOT)
490 return plus_constant (XEXP (op, 0), 1);
492 /* (neg (minus X Y)) can become (minus Y X). This transformation
493 isn't safe for modes with signed zeros, since if X and Y are
494 both +0, (minus Y X) is the same as (minus X Y). If the
495 rounding mode is towards +infinity (or -infinity) then the two
496 expressions will be rounded differently. */
497 if (GET_CODE (op) == MINUS
498 && !HONOR_SIGNED_ZEROS (mode)
499 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
500 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
502 if (GET_CODE (op) == PLUS
503 && !HONOR_SIGNED_ZEROS (mode)
504 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
506 /* (neg (plus A C)) is simplified to (minus -C A). */
507 if (GET_CODE (XEXP (op, 1)) == CONST_INT
508 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
510 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
511 if (temp)
512 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
515 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
516 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
517 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
520 /* (neg (mult A B)) becomes (mult (neg A) B).
521 This works even for floating-point values. */
522 if (GET_CODE (op) == MULT
523 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
525 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
526 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
529 /* NEG commutes with ASHIFT since it is multiplication. Only do
530 this if we can then eliminate the NEG (e.g., if the operand
531 is a constant). */
532 if (GET_CODE (op) == ASHIFT)
534 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
535 if (temp)
536 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
539 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
540 C is equal to the width of MODE minus 1. */
541 if (GET_CODE (op) == ASHIFTRT
542 && GET_CODE (XEXP (op, 1)) == CONST_INT
543 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
544 return simplify_gen_binary (LSHIFTRT, mode,
545 XEXP (op, 0), XEXP (op, 1));
547 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
548 C is equal to the width of MODE minus 1. */
549 if (GET_CODE (op) == LSHIFTRT
550 && GET_CODE (XEXP (op, 1)) == CONST_INT
551 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
552 return simplify_gen_binary (ASHIFTRT, mode,
553 XEXP (op, 0), XEXP (op, 1));
555 break;
557 case SIGN_EXTEND:
558 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
559 becomes just the MINUS if its mode is MODE. This allows
560 folding switch statements on machines using casesi (such as
561 the VAX). */
562 if (GET_CODE (op) == TRUNCATE
563 && GET_MODE (XEXP (op, 0)) == mode
564 && GET_CODE (XEXP (op, 0)) == MINUS
565 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
566 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
567 return XEXP (op, 0);
569 /* Check for a sign extension of a subreg of a promoted
570 variable, where the promotion is sign-extended, and the
571 target mode is the same as the variable's promotion. */
572 if (GET_CODE (op) == SUBREG
573 && SUBREG_PROMOTED_VAR_P (op)
574 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
575 && GET_MODE (XEXP (op, 0)) == mode)
576 return XEXP (op, 0);
578 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
579 if (! POINTERS_EXTEND_UNSIGNED
580 && mode == Pmode && GET_MODE (op) == ptr_mode
581 && (CONSTANT_P (op)
582 || (GET_CODE (op) == SUBREG
583 && REG_P (SUBREG_REG (op))
584 && REG_POINTER (SUBREG_REG (op))
585 && GET_MODE (SUBREG_REG (op)) == Pmode)))
586 return convert_memory_address (Pmode, op);
587 #endif
588 break;
590 case ZERO_EXTEND:
591 /* Check for a zero extension of a subreg of a promoted
592 variable, where the promotion is zero-extended, and the
593 target mode is the same as the variable's promotion. */
594 if (GET_CODE (op) == SUBREG
595 && SUBREG_PROMOTED_VAR_P (op)
596 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
597 && GET_MODE (XEXP (op, 0)) == mode)
598 return XEXP (op, 0);
600 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
601 if (POINTERS_EXTEND_UNSIGNED > 0
602 && mode == Pmode && GET_MODE (op) == ptr_mode
603 && (CONSTANT_P (op)
604 || (GET_CODE (op) == SUBREG
605 && REG_P (SUBREG_REG (op))
606 && REG_POINTER (SUBREG_REG (op))
607 && GET_MODE (SUBREG_REG (op)) == Pmode)))
608 return convert_memory_address (Pmode, op);
609 #endif
610 break;
612 default:
613 break;
616 return 0;
619 /* Try to compute the value of a unary operation CODE whose output mode is to
620 be MODE with input operand OP whose mode was originally OP_MODE.
621 Return zero if the value cannot be computed. */
623 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
624 rtx op, enum machine_mode op_mode)
626 unsigned int width = GET_MODE_BITSIZE (mode);
628 if (code == VEC_DUPLICATE)
630 gcc_assert (VECTOR_MODE_P (mode));
631 if (GET_MODE (op) != VOIDmode)
633 if (!VECTOR_MODE_P (GET_MODE (op)))
634 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
635 else
636 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
637 (GET_MODE (op)));
639 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
640 || GET_CODE (op) == CONST_VECTOR)
642 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
643 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
644 rtvec v = rtvec_alloc (n_elts);
645 unsigned int i;
647 if (GET_CODE (op) != CONST_VECTOR)
648 for (i = 0; i < n_elts; i++)
649 RTVEC_ELT (v, i) = op;
650 else
652 enum machine_mode inmode = GET_MODE (op);
653 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
654 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
656 gcc_assert (in_n_elts < n_elts);
657 gcc_assert ((n_elts % in_n_elts) == 0);
658 for (i = 0; i < n_elts; i++)
659 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
661 return gen_rtx_CONST_VECTOR (mode, v);
665 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
667 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
668 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
669 enum machine_mode opmode = GET_MODE (op);
670 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
671 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
672 rtvec v = rtvec_alloc (n_elts);
673 unsigned int i;
675 gcc_assert (op_n_elts == n_elts);
676 for (i = 0; i < n_elts; i++)
678 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
679 CONST_VECTOR_ELT (op, i),
680 GET_MODE_INNER (opmode));
681 if (!x)
682 return 0;
683 RTVEC_ELT (v, i) = x;
685 return gen_rtx_CONST_VECTOR (mode, v);
688 /* The order of these tests is critical so that, for example, we don't
689 check the wrong mode (input vs. output) for a conversion operation,
690 such as FIX. At some point, this should be simplified. */
692 if (code == FLOAT && GET_MODE (op) == VOIDmode
693 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
695 HOST_WIDE_INT hv, lv;
696 REAL_VALUE_TYPE d;
698 if (GET_CODE (op) == CONST_INT)
699 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
700 else
701 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
703 REAL_VALUE_FROM_INT (d, lv, hv, mode);
704 d = real_value_truncate (mode, d);
705 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
707 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
708 && (GET_CODE (op) == CONST_DOUBLE
709 || GET_CODE (op) == CONST_INT))
711 HOST_WIDE_INT hv, lv;
712 REAL_VALUE_TYPE d;
714 if (GET_CODE (op) == CONST_INT)
715 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
716 else
717 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
719 if (op_mode == VOIDmode)
721 /* We don't know how to interpret negative-looking numbers in
722 this case, so don't try to fold those. */
723 if (hv < 0)
724 return 0;
726 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
728 else
729 hv = 0, lv &= GET_MODE_MASK (op_mode);
731 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
732 d = real_value_truncate (mode, d);
733 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
736 if (GET_CODE (op) == CONST_INT
737 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
739 HOST_WIDE_INT arg0 = INTVAL (op);
740 HOST_WIDE_INT val;
742 switch (code)
744 case NOT:
745 val = ~ arg0;
746 break;
748 case NEG:
749 val = - arg0;
750 break;
752 case ABS:
753 val = (arg0 >= 0 ? arg0 : - arg0);
754 break;
756 case FFS:
757 /* Don't use ffs here. Instead, get low order bit and then its
758 number. If arg0 is zero, this will return 0, as desired. */
759 arg0 &= GET_MODE_MASK (mode);
760 val = exact_log2 (arg0 & (- arg0)) + 1;
761 break;
763 case CLZ:
764 arg0 &= GET_MODE_MASK (mode);
765 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
767 else
768 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
769 break;
771 case CTZ:
772 arg0 &= GET_MODE_MASK (mode);
773 if (arg0 == 0)
775 /* Even if the value at zero is undefined, we have to come
776 up with some replacement. Seems good enough. */
777 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
778 val = GET_MODE_BITSIZE (mode);
780 else
781 val = exact_log2 (arg0 & -arg0);
782 break;
784 case POPCOUNT:
785 arg0 &= GET_MODE_MASK (mode);
786 val = 0;
787 while (arg0)
788 val++, arg0 &= arg0 - 1;
789 break;
791 case PARITY:
792 arg0 &= GET_MODE_MASK (mode);
793 val = 0;
794 while (arg0)
795 val++, arg0 &= arg0 - 1;
796 val &= 1;
797 break;
799 case TRUNCATE:
800 val = arg0;
801 break;
803 case ZERO_EXTEND:
804 /* When zero-extending a CONST_INT, we need to know its
805 original mode. */
806 gcc_assert (op_mode != VOIDmode);
807 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
809 /* If we were really extending the mode,
810 we would have to distinguish between zero-extension
811 and sign-extension. */
812 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
813 val = arg0;
815 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
816 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
817 else
818 return 0;
819 break;
821 case SIGN_EXTEND:
822 if (op_mode == VOIDmode)
823 op_mode = mode;
824 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
826 /* If we were really extending the mode,
827 we would have to distinguish between zero-extension
828 and sign-extension. */
829 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
830 val = arg0;
832 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
835 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
836 if (val
837 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
838 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
840 else
841 return 0;
842 break;
844 case SQRT:
845 case FLOAT_EXTEND:
846 case FLOAT_TRUNCATE:
847 case SS_TRUNCATE:
848 case US_TRUNCATE:
849 return 0;
851 default:
852 gcc_unreachable ();
855 return gen_int_mode (val, mode);
858 /* We can do some operations on integer CONST_DOUBLEs. Also allow
859 for a DImode operation on a CONST_INT. */
860 else if (GET_MODE (op) == VOIDmode
861 && width <= HOST_BITS_PER_WIDE_INT * 2
862 && (GET_CODE (op) == CONST_DOUBLE
863 || GET_CODE (op) == CONST_INT))
865 unsigned HOST_WIDE_INT l1, lv;
866 HOST_WIDE_INT h1, hv;
868 if (GET_CODE (op) == CONST_DOUBLE)
869 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
870 else
871 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
873 switch (code)
875 case NOT:
876 lv = ~ l1;
877 hv = ~ h1;
878 break;
880 case NEG:
881 neg_double (l1, h1, &lv, &hv);
882 break;
884 case ABS:
885 if (h1 < 0)
886 neg_double (l1, h1, &lv, &hv);
887 else
888 lv = l1, hv = h1;
889 break;
891 case FFS:
892 hv = 0;
893 if (l1 == 0)
895 if (h1 == 0)
896 lv = 0;
897 else
898 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
900 else
901 lv = exact_log2 (l1 & -l1) + 1;
902 break;
904 case CLZ:
905 hv = 0;
906 if (h1 != 0)
907 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
908 - HOST_BITS_PER_WIDE_INT;
909 else if (l1 != 0)
910 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
911 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
912 lv = GET_MODE_BITSIZE (mode);
913 break;
915 case CTZ:
916 hv = 0;
917 if (l1 != 0)
918 lv = exact_log2 (l1 & -l1);
919 else if (h1 != 0)
920 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
921 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
922 lv = GET_MODE_BITSIZE (mode);
923 break;
925 case POPCOUNT:
926 hv = 0;
927 lv = 0;
928 while (l1)
929 lv++, l1 &= l1 - 1;
930 while (h1)
931 lv++, h1 &= h1 - 1;
932 break;
934 case PARITY:
935 hv = 0;
936 lv = 0;
937 while (l1)
938 lv++, l1 &= l1 - 1;
939 while (h1)
940 lv++, h1 &= h1 - 1;
941 lv &= 1;
942 break;
944 case TRUNCATE:
945 /* This is just a change-of-mode, so do nothing. */
946 lv = l1, hv = h1;
947 break;
949 case ZERO_EXTEND:
950 gcc_assert (op_mode != VOIDmode);
952 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
953 return 0;
955 hv = 0;
956 lv = l1 & GET_MODE_MASK (op_mode);
957 break;
959 case SIGN_EXTEND:
960 if (op_mode == VOIDmode
961 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
962 return 0;
963 else
965 lv = l1 & GET_MODE_MASK (op_mode);
966 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
967 && (lv & ((HOST_WIDE_INT) 1
968 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
969 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
971 hv = HWI_SIGN_EXTEND (lv);
973 break;
975 case SQRT:
976 return 0;
978 default:
979 return 0;
982 return immed_double_const (lv, hv, mode);
985 else if (GET_CODE (op) == CONST_DOUBLE
986 && GET_MODE_CLASS (mode) == MODE_FLOAT)
988 REAL_VALUE_TYPE d, t;
989 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
991 switch (code)
993 case SQRT:
994 if (HONOR_SNANS (mode) && real_isnan (&d))
995 return 0;
996 real_sqrt (&t, mode, &d);
997 d = t;
998 break;
999 case ABS:
1000 d = REAL_VALUE_ABS (d);
1001 break;
1002 case NEG:
1003 d = REAL_VALUE_NEGATE (d);
1004 break;
1005 case FLOAT_TRUNCATE:
1006 d = real_value_truncate (mode, d);
1007 break;
1008 case FLOAT_EXTEND:
1009 /* All this does is change the mode. */
1010 break;
1011 case FIX:
1012 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1013 break;
1014 case NOT:
1016 long tmp[4];
1017 int i;
1019 real_to_target (tmp, &d, GET_MODE (op));
1020 for (i = 0; i < 4; i++)
1021 tmp[i] = ~tmp[i];
1022 real_from_target (&d, tmp, mode);
1023 break;
1025 default:
1026 gcc_unreachable ();
1028 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1031 else if (GET_CODE (op) == CONST_DOUBLE
1032 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
1033 && GET_MODE_CLASS (mode) == MODE_INT
1034 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1036 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1037 operators are intentionally left unspecified (to ease implementation
1038 by target backends), for consistency, this routine implements the
1039 same semantics for constant folding as used by the middle-end. */
1041 /* This was formerly used only for non-IEEE float.
1042 eggert@twinsun.com says it is safe for IEEE also. */
1043 HOST_WIDE_INT xh, xl, th, tl;
1044 REAL_VALUE_TYPE x, t;
1045 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1046 switch (code)
1048 case FIX:
1049 if (REAL_VALUE_ISNAN (x))
1050 return const0_rtx;
1052 /* Test against the signed upper bound. */
1053 if (width > HOST_BITS_PER_WIDE_INT)
1055 th = ((unsigned HOST_WIDE_INT) 1
1056 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1057 tl = -1;
1059 else
1061 th = 0;
1062 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1064 real_from_integer (&t, VOIDmode, tl, th, 0);
1065 if (REAL_VALUES_LESS (t, x))
1067 xh = th;
1068 xl = tl;
1069 break;
1072 /* Test against the signed lower bound. */
1073 if (width > HOST_BITS_PER_WIDE_INT)
1075 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1076 tl = 0;
1078 else
1080 th = -1;
1081 tl = (HOST_WIDE_INT) -1 << (width - 1);
1083 real_from_integer (&t, VOIDmode, tl, th, 0);
1084 if (REAL_VALUES_LESS (x, t))
1086 xh = th;
1087 xl = tl;
1088 break;
1090 REAL_VALUE_TO_INT (&xl, &xh, x);
1091 break;
1093 case UNSIGNED_FIX:
1094 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1095 return const0_rtx;
1097 /* Test against the unsigned upper bound. */
1098 if (width == 2*HOST_BITS_PER_WIDE_INT)
1100 th = -1;
1101 tl = -1;
1103 else if (width >= HOST_BITS_PER_WIDE_INT)
1105 th = ((unsigned HOST_WIDE_INT) 1
1106 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1107 tl = -1;
1109 else
1111 th = 0;
1112 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1114 real_from_integer (&t, VOIDmode, tl, th, 1);
1115 if (REAL_VALUES_LESS (t, x))
1117 xh = th;
1118 xl = tl;
1119 break;
1122 REAL_VALUE_TO_INT (&xl, &xh, x);
1123 break;
1125 default:
1126 gcc_unreachable ();
1128 return immed_double_const (xl, xh, mode);
1131 return NULL_RTX;
1134 /* Subroutine of simplify_binary_operation to simplify a commutative,
1135 associative binary operation CODE with result mode MODE, operating
1136 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1137 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1138 canonicalization is possible. */
1140 static rtx
1141 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1142 rtx op0, rtx op1)
1144 rtx tem;
1146 /* Linearize the operator to the left. */
1147 if (GET_CODE (op1) == code)
1149 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1150 if (GET_CODE (op0) == code)
1152 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1153 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1156 /* "a op (b op c)" becomes "(b op c) op a". */
1157 if (! swap_commutative_operands_p (op1, op0))
1158 return simplify_gen_binary (code, mode, op1, op0);
1160 tem = op0;
1161 op0 = op1;
1162 op1 = tem;
1165 if (GET_CODE (op0) == code)
1167 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1168 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1170 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1171 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1174 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1175 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1176 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1177 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1178 if (tem != 0)
1179 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1181 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1182 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1183 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1184 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1185 if (tem != 0)
1186 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1189 return 0;
1193 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1194 and OP1. Return 0 if no simplification is possible.
1196 Don't use this for relational operations such as EQ or LT.
1197 Use simplify_relational_operation instead. */
1199 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1200 rtx op0, rtx op1)
1202 rtx trueop0, trueop1;
1203 rtx tem;
1205 /* Relational operations don't work here. We must know the mode
1206 of the operands in order to do the comparison correctly.
1207 Assuming a full word can give incorrect results.
1208 Consider comparing 128 with -128 in QImode. */
1209 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1210 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1212 /* Make sure the constant is second. */
1213 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1214 && swap_commutative_operands_p (op0, op1))
1216 tem = op0, op0 = op1, op1 = tem;
1219 trueop0 = avoid_constant_pool_reference (op0);
1220 trueop1 = avoid_constant_pool_reference (op1);
1222 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1223 if (tem)
1224 return tem;
1225 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1228 static rtx
1229 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1230 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1232 rtx tem;
1233 HOST_WIDE_INT val;
1234 unsigned int width = GET_MODE_BITSIZE (mode);
1236 /* Even if we can't compute a constant result,
1237 there are some cases worth simplifying. */
1239 switch (code)
1241 case PLUS:
1242 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1243 when x is NaN, infinite, or finite and nonzero. They aren't
1244 when x is -0 and the rounding mode is not towards -infinity,
1245 since (-0) + 0 is then 0. */
1246 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1247 return op0;
1249 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1250 transformations are safe even for IEEE. */
1251 if (GET_CODE (op0) == NEG)
1252 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1253 else if (GET_CODE (op1) == NEG)
1254 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1256 /* (~a) + 1 -> -a */
1257 if (INTEGRAL_MODE_P (mode)
1258 && GET_CODE (op0) == NOT
1259 && trueop1 == const1_rtx)
1260 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1262 /* Handle both-operands-constant cases. We can only add
1263 CONST_INTs to constants since the sum of relocatable symbols
1264 can't be handled by most assemblers. Don't add CONST_INT
1265 to CONST_INT since overflow won't be computed properly if wider
1266 than HOST_BITS_PER_WIDE_INT. */
1268 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1269 && GET_CODE (op1) == CONST_INT)
1270 return plus_constant (op0, INTVAL (op1));
1271 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1272 && GET_CODE (op0) == CONST_INT)
1273 return plus_constant (op1, INTVAL (op0));
1275 /* See if this is something like X * C - X or vice versa or
1276 if the multiplication is written as a shift. If so, we can
1277 distribute and make a new multiply, shift, or maybe just
1278 have X (if C is 2 in the example above). But don't make
1279 something more expensive than we had before. */
1281 if (SCALAR_INT_MODE_P (mode))
1283 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1284 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1285 rtx lhs = op0, rhs = op1;
1287 if (GET_CODE (lhs) == NEG)
1289 coeff0l = -1;
1290 coeff0h = -1;
1291 lhs = XEXP (lhs, 0);
1293 else if (GET_CODE (lhs) == MULT
1294 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1296 coeff0l = INTVAL (XEXP (lhs, 1));
1297 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1298 lhs = XEXP (lhs, 0);
1300 else if (GET_CODE (lhs) == ASHIFT
1301 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1302 && INTVAL (XEXP (lhs, 1)) >= 0
1303 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1305 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1306 coeff0h = 0;
1307 lhs = XEXP (lhs, 0);
1310 if (GET_CODE (rhs) == NEG)
1312 coeff1l = -1;
1313 coeff1h = -1;
1314 rhs = XEXP (rhs, 0);
1316 else if (GET_CODE (rhs) == MULT
1317 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1319 coeff1l = INTVAL (XEXP (rhs, 1));
1320 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1321 rhs = XEXP (rhs, 0);
1323 else if (GET_CODE (rhs) == ASHIFT
1324 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1325 && INTVAL (XEXP (rhs, 1)) >= 0
1326 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1328 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1329 coeff1h = 0;
1330 rhs = XEXP (rhs, 0);
1333 if (rtx_equal_p (lhs, rhs))
1335 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1336 rtx coeff;
1337 unsigned HOST_WIDE_INT l;
1338 HOST_WIDE_INT h;
1340 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1341 coeff = immed_double_const (l, h, mode);
1343 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1344 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1345 ? tem : 0;
1349 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1350 if ((GET_CODE (op1) == CONST_INT
1351 || GET_CODE (op1) == CONST_DOUBLE)
1352 && GET_CODE (op0) == XOR
1353 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1354 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1355 && mode_signbit_p (mode, op1))
1356 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1357 simplify_gen_binary (XOR, mode, op1,
1358 XEXP (op0, 1)));
1360 /* If one of the operands is a PLUS or a MINUS, see if we can
1361 simplify this by the associative law.
1362 Don't use the associative law for floating point.
1363 The inaccuracy makes it nonassociative,
1364 and subtle programs can break if operations are associated. */
1366 if (INTEGRAL_MODE_P (mode)
1367 && (plus_minus_operand_p (op0)
1368 || plus_minus_operand_p (op1))
1369 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1370 return tem;
1372 /* Reassociate floating point addition only when the user
1373 specifies unsafe math optimizations. */
1374 if (FLOAT_MODE_P (mode)
1375 && flag_unsafe_math_optimizations)
1377 tem = simplify_associative_operation (code, mode, op0, op1);
1378 if (tem)
1379 return tem;
1381 break;
1383 case COMPARE:
1384 #ifdef HAVE_cc0
1385 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1386 using cc0, in which case we want to leave it as a COMPARE
1387 so we can distinguish it from a register-register-copy.
1389 In IEEE floating point, x-0 is not the same as x. */
1391 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1392 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1393 && trueop1 == CONST0_RTX (mode))
1394 return op0;
1395 #endif
1397 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1398 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1399 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1400 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1402 rtx xop00 = XEXP (op0, 0);
1403 rtx xop10 = XEXP (op1, 0);
1405 #ifdef HAVE_cc0
1406 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1407 #else
1408 if (REG_P (xop00) && REG_P (xop10)
1409 && GET_MODE (xop00) == GET_MODE (xop10)
1410 && REGNO (xop00) == REGNO (xop10)
1411 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1412 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1413 #endif
1414 return xop00;
1416 break;
1418 case MINUS:
1419 /* We can't assume x-x is 0 even with non-IEEE floating point,
1420 but since it is zero except in very strange circumstances, we
1421 will treat it as zero with -funsafe-math-optimizations. */
1422 if (rtx_equal_p (trueop0, trueop1)
1423 && ! side_effects_p (op0)
1424 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1425 return CONST0_RTX (mode);
1427 /* Change subtraction from zero into negation. (0 - x) is the
1428 same as -x when x is NaN, infinite, or finite and nonzero.
1429 But if the mode has signed zeros, and does not round towards
1430 -infinity, then 0 - 0 is 0, not -0. */
1431 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1432 return simplify_gen_unary (NEG, mode, op1, mode);
1434 /* (-1 - a) is ~a. */
1435 if (trueop0 == constm1_rtx)
1436 return simplify_gen_unary (NOT, mode, op1, mode);
1438 /* Subtracting 0 has no effect unless the mode has signed zeros
1439 and supports rounding towards -infinity. In such a case,
1440 0 - 0 is -0. */
1441 if (!(HONOR_SIGNED_ZEROS (mode)
1442 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1443 && trueop1 == CONST0_RTX (mode))
1444 return op0;
1446 /* See if this is something like X * C - X or vice versa or
1447 if the multiplication is written as a shift. If so, we can
1448 distribute and make a new multiply, shift, or maybe just
1449 have X (if C is 2 in the example above). But don't make
1450 something more expensive than we had before. */
1452 if (SCALAR_INT_MODE_P (mode))
1454 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1455 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1456 rtx lhs = op0, rhs = op1;
1458 if (GET_CODE (lhs) == NEG)
1460 coeff0l = -1;
1461 coeff0h = -1;
1462 lhs = XEXP (lhs, 0);
1464 else if (GET_CODE (lhs) == MULT
1465 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1467 coeff0l = INTVAL (XEXP (lhs, 1));
1468 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1469 lhs = XEXP (lhs, 0);
1471 else if (GET_CODE (lhs) == ASHIFT
1472 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1473 && INTVAL (XEXP (lhs, 1)) >= 0
1474 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1476 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1477 coeff0h = 0;
1478 lhs = XEXP (lhs, 0);
1481 if (GET_CODE (rhs) == NEG)
1483 negcoeff1l = 1;
1484 negcoeff1h = 0;
1485 rhs = XEXP (rhs, 0);
1487 else if (GET_CODE (rhs) == MULT
1488 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1490 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1491 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1492 rhs = XEXP (rhs, 0);
1494 else if (GET_CODE (rhs) == ASHIFT
1495 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1496 && INTVAL (XEXP (rhs, 1)) >= 0
1497 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1499 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1500 negcoeff1h = -1;
1501 rhs = XEXP (rhs, 0);
1504 if (rtx_equal_p (lhs, rhs))
1506 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1507 rtx coeff;
1508 unsigned HOST_WIDE_INT l;
1509 HOST_WIDE_INT h;
1511 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1512 coeff = immed_double_const (l, h, mode);
1514 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1515 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1516 ? tem : 0;
1520 /* (a - (-b)) -> (a + b). True even for IEEE. */
1521 if (GET_CODE (op1) == NEG)
1522 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1524 /* (-x - c) may be simplified as (-c - x). */
1525 if (GET_CODE (op0) == NEG
1526 && (GET_CODE (op1) == CONST_INT
1527 || GET_CODE (op1) == CONST_DOUBLE))
1529 tem = simplify_unary_operation (NEG, mode, op1, mode);
1530 if (tem)
1531 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1534 /* If one of the operands is a PLUS or a MINUS, see if we can
1535 simplify this by the associative law.
1536 Don't use the associative law for floating point.
1537 The inaccuracy makes it nonassociative,
1538 and subtle programs can break if operations are associated. */
1540 if (INTEGRAL_MODE_P (mode)
1541 && (plus_minus_operand_p (op0)
1542 || plus_minus_operand_p (op1))
1543 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1544 return tem;
1546 /* Don't let a relocatable value get a negative coeff. */
1547 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1548 return simplify_gen_binary (PLUS, mode,
1549 op0,
1550 neg_const_int (mode, op1));
1552 /* (x - (x & y)) -> (x & ~y) */
1553 if (GET_CODE (op1) == AND)
1555 if (rtx_equal_p (op0, XEXP (op1, 0)))
1557 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1558 GET_MODE (XEXP (op1, 1)));
1559 return simplify_gen_binary (AND, mode, op0, tem);
1561 if (rtx_equal_p (op0, XEXP (op1, 1)))
1563 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1564 GET_MODE (XEXP (op1, 0)));
1565 return simplify_gen_binary (AND, mode, op0, tem);
1568 break;
1570 case MULT:
1571 if (trueop1 == constm1_rtx)
1572 return simplify_gen_unary (NEG, mode, op0, mode);
1574 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1575 x is NaN, since x * 0 is then also NaN. Nor is it valid
1576 when the mode has signed zeros, since multiplying a negative
1577 number by 0 will give -0, not 0. */
1578 if (!HONOR_NANS (mode)
1579 && !HONOR_SIGNED_ZEROS (mode)
1580 && trueop1 == CONST0_RTX (mode)
1581 && ! side_effects_p (op0))
1582 return op1;
1584 /* In IEEE floating point, x*1 is not equivalent to x for
1585 signalling NaNs. */
1586 if (!HONOR_SNANS (mode)
1587 && trueop1 == CONST1_RTX (mode))
1588 return op0;
1590 /* Convert multiply by constant power of two into shift unless
1591 we are still generating RTL. This test is a kludge. */
1592 if (GET_CODE (trueop1) == CONST_INT
1593 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1594 /* If the mode is larger than the host word size, and the
1595 uppermost bit is set, then this isn't a power of two due
1596 to implicit sign extension. */
1597 && (width <= HOST_BITS_PER_WIDE_INT
1598 || val != HOST_BITS_PER_WIDE_INT - 1))
1599 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1601 /* Likewise for multipliers wider than a word. */
1602 else if (GET_CODE (trueop1) == CONST_DOUBLE
1603 && (GET_MODE (trueop1) == VOIDmode
1604 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1605 && GET_MODE (op0) == mode
1606 && CONST_DOUBLE_LOW (trueop1) == 0
1607 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1608 return simplify_gen_binary (ASHIFT, mode, op0,
1609 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1611 /* x*2 is x+x and x*(-1) is -x */
1612 if (GET_CODE (trueop1) == CONST_DOUBLE
1613 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1614 && GET_MODE (op0) == mode)
1616 REAL_VALUE_TYPE d;
1617 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1619 if (REAL_VALUES_EQUAL (d, dconst2))
1620 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1622 if (REAL_VALUES_EQUAL (d, dconstm1))
1623 return simplify_gen_unary (NEG, mode, op0, mode);
1626 /* Reassociate multiplication, but for floating point MULTs
1627 only when the user specifies unsafe math optimizations. */
1628 if (! FLOAT_MODE_P (mode)
1629 || flag_unsafe_math_optimizations)
1631 tem = simplify_associative_operation (code, mode, op0, op1);
1632 if (tem)
1633 return tem;
1635 break;
1637 case IOR:
1638 if (trueop1 == const0_rtx)
1639 return op0;
1640 if (GET_CODE (trueop1) == CONST_INT
1641 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1642 == GET_MODE_MASK (mode)))
1643 return op1;
1644 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1645 return op0;
1646 /* A | (~A) -> -1 */
1647 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1648 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1649 && ! side_effects_p (op0)
1650 && SCALAR_INT_MODE_P (mode))
1651 return constm1_rtx;
1652 tem = simplify_associative_operation (code, mode, op0, op1);
1653 if (tem)
1654 return tem;
1655 break;
1657 case XOR:
1658 if (trueop1 == const0_rtx)
1659 return op0;
1660 if (GET_CODE (trueop1) == CONST_INT
1661 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1662 == GET_MODE_MASK (mode)))
1663 return simplify_gen_unary (NOT, mode, op0, mode);
1664 if (rtx_equal_p (trueop0, trueop1)
1665 && ! side_effects_p (op0)
1666 && GET_MODE_CLASS (mode) != MODE_CC)
1667 return CONST0_RTX (mode);
1669 /* Canonicalize XOR of the most significant bit to PLUS. */
1670 if ((GET_CODE (op1) == CONST_INT
1671 || GET_CODE (op1) == CONST_DOUBLE)
1672 && mode_signbit_p (mode, op1))
1673 return simplify_gen_binary (PLUS, mode, op0, op1);
1674 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1675 if ((GET_CODE (op1) == CONST_INT
1676 || GET_CODE (op1) == CONST_DOUBLE)
1677 && GET_CODE (op0) == PLUS
1678 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1679 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1680 && mode_signbit_p (mode, XEXP (op0, 1)))
1681 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1682 simplify_gen_binary (XOR, mode, op1,
1683 XEXP (op0, 1)));
1685 tem = simplify_associative_operation (code, mode, op0, op1);
1686 if (tem)
1687 return tem;
1688 break;
1690 case AND:
1691 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
1692 return trueop1;
1693 /* If we are turning off bits already known off in OP0, we need
1694 not do an AND. */
1695 if (GET_CODE (trueop1) == CONST_INT
1696 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1697 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1698 return op0;
1699 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
1700 && GET_MODE_CLASS (mode) != MODE_CC)
1701 return op0;
1702 /* A & (~A) -> 0 */
1703 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1704 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1705 && ! side_effects_p (op0)
1706 && GET_MODE_CLASS (mode) != MODE_CC)
1707 return CONST0_RTX (mode);
1709 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1710 there are no nonzero bits of C outside of X's mode. */
1711 if ((GET_CODE (op0) == SIGN_EXTEND
1712 || GET_CODE (op0) == ZERO_EXTEND)
1713 && GET_CODE (trueop1) == CONST_INT
1714 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1715 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1716 & INTVAL (trueop1)) == 0)
1718 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1719 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1720 gen_int_mode (INTVAL (trueop1),
1721 imode));
1722 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1725 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1726 ((A & N) + B) & M -> (A + B) & M
1727 Similarly if (N & M) == 0,
1728 ((A | N) + B) & M -> (A + B) & M
1729 and for - instead of + and/or ^ instead of |. */
1730 if (GET_CODE (trueop1) == CONST_INT
1731 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1732 && ~INTVAL (trueop1)
1733 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1734 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1736 rtx pmop[2];
1737 int which;
1739 pmop[0] = XEXP (op0, 0);
1740 pmop[1] = XEXP (op0, 1);
1742 for (which = 0; which < 2; which++)
1744 tem = pmop[which];
1745 switch (GET_CODE (tem))
1747 case AND:
1748 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1749 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1750 == INTVAL (trueop1))
1751 pmop[which] = XEXP (tem, 0);
1752 break;
1753 case IOR:
1754 case XOR:
1755 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1756 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1757 pmop[which] = XEXP (tem, 0);
1758 break;
1759 default:
1760 break;
1764 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1766 tem = simplify_gen_binary (GET_CODE (op0), mode,
1767 pmop[0], pmop[1]);
1768 return simplify_gen_binary (code, mode, tem, op1);
1771 tem = simplify_associative_operation (code, mode, op0, op1);
1772 if (tem)
1773 return tem;
1774 break;
1776 case UDIV:
1777 /* 0/x is 0 (or x&0 if x has side-effects). */
1778 if (trueop0 == CONST0_RTX (mode))
1780 if (side_effects_p (op1))
1781 return simplify_gen_binary (AND, mode, op1, trueop0);
1782 return trueop0;
1784 /* x/1 is x. */
1785 if (trueop1 == CONST1_RTX (mode))
1786 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1787 /* Convert divide by power of two into shift. */
1788 if (GET_CODE (trueop1) == CONST_INT
1789 && (val = exact_log2 (INTVAL (trueop1))) > 0)
1790 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
1791 break;
1793 case DIV:
1794 /* Handle floating point and integers separately. */
1795 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1797 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1798 safe for modes with NaNs, since 0.0 / 0.0 will then be
1799 NaN rather than 0.0. Nor is it safe for modes with signed
1800 zeros, since dividing 0 by a negative number gives -0.0 */
1801 if (trueop0 == CONST0_RTX (mode)
1802 && !HONOR_NANS (mode)
1803 && !HONOR_SIGNED_ZEROS (mode)
1804 && ! side_effects_p (op1))
1805 return op0;
1806 /* x/1.0 is x. */
1807 if (trueop1 == CONST1_RTX (mode)
1808 && !HONOR_SNANS (mode))
1809 return op0;
1811 if (GET_CODE (trueop1) == CONST_DOUBLE
1812 && trueop1 != CONST0_RTX (mode))
1814 REAL_VALUE_TYPE d;
1815 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1817 /* x/-1.0 is -x. */
1818 if (REAL_VALUES_EQUAL (d, dconstm1)
1819 && !HONOR_SNANS (mode))
1820 return simplify_gen_unary (NEG, mode, op0, mode);
1822 /* Change FP division by a constant into multiplication.
1823 Only do this with -funsafe-math-optimizations. */
1824 if (flag_unsafe_math_optimizations
1825 && !REAL_VALUES_EQUAL (d, dconst0))
1827 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1828 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1829 return simplify_gen_binary (MULT, mode, op0, tem);
1833 else
1835 /* 0/x is 0 (or x&0 if x has side-effects). */
1836 if (trueop0 == CONST0_RTX (mode))
1838 if (side_effects_p (op1))
1839 return simplify_gen_binary (AND, mode, op1, trueop0);
1840 return trueop0;
1842 /* x/1 is x. */
1843 if (trueop1 == CONST1_RTX (mode))
1844 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1845 /* x/-1 is -x. */
1846 if (trueop1 == constm1_rtx)
1848 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
1849 return simplify_gen_unary (NEG, mode, x, mode);
1852 break;
1854 case UMOD:
1855 /* 0%x is 0 (or x&0 if x has side-effects). */
1856 if (trueop0 == CONST0_RTX (mode))
1858 if (side_effects_p (op1))
1859 return simplify_gen_binary (AND, mode, op1, trueop0);
1860 return trueop0;
1862 /* x%1 is 0 (of x&0 if x has side-effects). */
1863 if (trueop1 == CONST1_RTX (mode))
1865 if (side_effects_p (op0))
1866 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
1867 return CONST0_RTX (mode);
1869 /* Implement modulus by power of two as AND. */
1870 if (GET_CODE (trueop1) == CONST_INT
1871 && exact_log2 (INTVAL (trueop1)) > 0)
1872 return simplify_gen_binary (AND, mode, op0,
1873 GEN_INT (INTVAL (op1) - 1));
1874 break;
1876 case MOD:
1877 /* 0%x is 0 (or x&0 if x has side-effects). */
1878 if (trueop0 == CONST0_RTX (mode))
1880 if (side_effects_p (op1))
1881 return simplify_gen_binary (AND, mode, op1, trueop0);
1882 return trueop0;
1884 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1885 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
1887 if (side_effects_p (op0))
1888 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
1889 return CONST0_RTX (mode);
1891 break;
1893 case ROTATERT:
1894 case ROTATE:
1895 case ASHIFTRT:
1896 /* Rotating ~0 always results in ~0. */
1897 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1898 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1899 && ! side_effects_p (op1))
1900 return op0;
1902 /* Fall through.... */
1904 case ASHIFT:
1905 case LSHIFTRT:
1906 if (trueop1 == CONST0_RTX (mode))
1907 return op0;
1908 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
1909 return op0;
1910 break;
1912 case SMIN:
1913 if (width <= HOST_BITS_PER_WIDE_INT
1914 && GET_CODE (trueop1) == CONST_INT
1915 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1916 && ! side_effects_p (op0))
1917 return op1;
1918 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1919 return op0;
1920 tem = simplify_associative_operation (code, mode, op0, op1);
1921 if (tem)
1922 return tem;
1923 break;
1925 case SMAX:
1926 if (width <= HOST_BITS_PER_WIDE_INT
1927 && GET_CODE (trueop1) == CONST_INT
1928 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1929 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1930 && ! side_effects_p (op0))
1931 return op1;
1932 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1933 return op0;
1934 tem = simplify_associative_operation (code, mode, op0, op1);
1935 if (tem)
1936 return tem;
1937 break;
1939 case UMIN:
1940 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
1941 return op1;
1942 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1943 return op0;
1944 tem = simplify_associative_operation (code, mode, op0, op1);
1945 if (tem)
1946 return tem;
1947 break;
1949 case UMAX:
1950 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1951 return op1;
1952 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1953 return op0;
1954 tem = simplify_associative_operation (code, mode, op0, op1);
1955 if (tem)
1956 return tem;
1957 break;
1959 case SS_PLUS:
1960 case US_PLUS:
1961 case SS_MINUS:
1962 case US_MINUS:
1963 /* ??? There are simplifications that can be done. */
1964 return 0;
1966 case VEC_SELECT:
1967 if (!VECTOR_MODE_P (mode))
1969 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1970 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
1971 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1972 gcc_assert (XVECLEN (trueop1, 0) == 1);
1973 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
1975 if (GET_CODE (trueop0) == CONST_VECTOR)
1976 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
1977 (trueop1, 0, 0)));
1979 else
1981 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1982 gcc_assert (GET_MODE_INNER (mode)
1983 == GET_MODE_INNER (GET_MODE (trueop0)));
1984 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1986 if (GET_CODE (trueop0) == CONST_VECTOR)
1988 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1989 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1990 rtvec v = rtvec_alloc (n_elts);
1991 unsigned int i;
1993 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
1994 for (i = 0; i < n_elts; i++)
1996 rtx x = XVECEXP (trueop1, 0, i);
1998 gcc_assert (GET_CODE (x) == CONST_INT);
1999 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2000 INTVAL (x));
2003 return gen_rtx_CONST_VECTOR (mode, v);
2006 return 0;
2007 case VEC_CONCAT:
2009 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2010 ? GET_MODE (trueop0)
2011 : GET_MODE_INNER (mode));
2012 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2013 ? GET_MODE (trueop1)
2014 : GET_MODE_INNER (mode));
2016 gcc_assert (VECTOR_MODE_P (mode));
2017 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2018 == GET_MODE_SIZE (mode));
2020 if (VECTOR_MODE_P (op0_mode))
2021 gcc_assert (GET_MODE_INNER (mode)
2022 == GET_MODE_INNER (op0_mode));
2023 else
2024 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2026 if (VECTOR_MODE_P (op1_mode))
2027 gcc_assert (GET_MODE_INNER (mode)
2028 == GET_MODE_INNER (op1_mode));
2029 else
2030 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2032 if ((GET_CODE (trueop0) == CONST_VECTOR
2033 || GET_CODE (trueop0) == CONST_INT
2034 || GET_CODE (trueop0) == CONST_DOUBLE)
2035 && (GET_CODE (trueop1) == CONST_VECTOR
2036 || GET_CODE (trueop1) == CONST_INT
2037 || GET_CODE (trueop1) == CONST_DOUBLE))
2039 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2040 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2041 rtvec v = rtvec_alloc (n_elts);
2042 unsigned int i;
2043 unsigned in_n_elts = 1;
2045 if (VECTOR_MODE_P (op0_mode))
2046 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2047 for (i = 0; i < n_elts; i++)
2049 if (i < in_n_elts)
2051 if (!VECTOR_MODE_P (op0_mode))
2052 RTVEC_ELT (v, i) = trueop0;
2053 else
2054 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2056 else
2058 if (!VECTOR_MODE_P (op1_mode))
2059 RTVEC_ELT (v, i) = trueop1;
2060 else
2061 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2062 i - in_n_elts);
2066 return gen_rtx_CONST_VECTOR (mode, v);
2069 return 0;
2071 default:
2072 gcc_unreachable ();
2075 return 0;
2079 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2080 rtx op0, rtx op1)
2082 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2083 HOST_WIDE_INT val;
2084 unsigned int width = GET_MODE_BITSIZE (mode);
2086 if (VECTOR_MODE_P (mode)
2087 && code != VEC_CONCAT
2088 && GET_CODE (op0) == CONST_VECTOR
2089 && GET_CODE (op1) == CONST_VECTOR)
2091 unsigned n_elts = GET_MODE_NUNITS (mode);
2092 enum machine_mode op0mode = GET_MODE (op0);
2093 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2094 enum machine_mode op1mode = GET_MODE (op1);
2095 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2096 rtvec v = rtvec_alloc (n_elts);
2097 unsigned int i;
2099 gcc_assert (op0_n_elts == n_elts);
2100 gcc_assert (op1_n_elts == n_elts);
2101 for (i = 0; i < n_elts; i++)
2103 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2104 CONST_VECTOR_ELT (op0, i),
2105 CONST_VECTOR_ELT (op1, i));
2106 if (!x)
2107 return 0;
2108 RTVEC_ELT (v, i) = x;
2111 return gen_rtx_CONST_VECTOR (mode, v);
2114 if (VECTOR_MODE_P (mode)
2115 && code == VEC_CONCAT
2116 && CONSTANT_P (op0) && CONSTANT_P (op1))
2118 unsigned n_elts = GET_MODE_NUNITS (mode);
2119 rtvec v = rtvec_alloc (n_elts);
2121 gcc_assert (n_elts >= 2);
2122 if (n_elts == 2)
2124 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2125 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2127 RTVEC_ELT (v, 0) = op0;
2128 RTVEC_ELT (v, 1) = op1;
2130 else
2132 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2133 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2134 unsigned i;
2136 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2137 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2138 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2140 for (i = 0; i < op0_n_elts; ++i)
2141 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2142 for (i = 0; i < op1_n_elts; ++i)
2143 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2146 return gen_rtx_CONST_VECTOR (mode, v);
2149 if (GET_MODE_CLASS (mode) == MODE_FLOAT
2150 && GET_CODE (op0) == CONST_DOUBLE
2151 && GET_CODE (op1) == CONST_DOUBLE
2152 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2154 if (code == AND
2155 || code == IOR
2156 || code == XOR)
2158 long tmp0[4];
2159 long tmp1[4];
2160 REAL_VALUE_TYPE r;
2161 int i;
2163 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2164 GET_MODE (op0));
2165 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2166 GET_MODE (op1));
2167 for (i = 0; i < 4; i++)
2169 switch (code)
2171 case AND:
2172 tmp0[i] &= tmp1[i];
2173 break;
2174 case IOR:
2175 tmp0[i] |= tmp1[i];
2176 break;
2177 case XOR:
2178 tmp0[i] ^= tmp1[i];
2179 break;
2180 default:
2181 gcc_unreachable ();
2184 real_from_target (&r, tmp0, mode);
2185 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2187 else
2189 REAL_VALUE_TYPE f0, f1, value, result;
2190 bool inexact;
2192 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2193 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2194 real_convert (&f0, mode, &f0);
2195 real_convert (&f1, mode, &f1);
2197 if (HONOR_SNANS (mode)
2198 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2199 return 0;
2201 if (code == DIV
2202 && REAL_VALUES_EQUAL (f1, dconst0)
2203 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2204 return 0;
2206 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2207 && flag_trapping_math
2208 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2210 int s0 = REAL_VALUE_NEGATIVE (f0);
2211 int s1 = REAL_VALUE_NEGATIVE (f1);
2213 switch (code)
2215 case PLUS:
2216 /* Inf + -Inf = NaN plus exception. */
2217 if (s0 != s1)
2218 return 0;
2219 break;
2220 case MINUS:
2221 /* Inf - Inf = NaN plus exception. */
2222 if (s0 == s1)
2223 return 0;
2224 break;
2225 case DIV:
2226 /* Inf / Inf = NaN plus exception. */
2227 return 0;
2228 default:
2229 break;
2233 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2234 && flag_trapping_math
2235 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2236 || (REAL_VALUE_ISINF (f1)
2237 && REAL_VALUES_EQUAL (f0, dconst0))))
2238 /* Inf * 0 = NaN plus exception. */
2239 return 0;
2241 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2242 &f0, &f1);
2243 real_convert (&result, mode, &value);
2245 /* Don't constant fold this floating point operation if the
2246 result may dependent upon the run-time rounding mode and
2247 flag_rounding_math is set, or if GCC's software emulation
2248 is unable to accurately represent the result. */
2250 if ((flag_rounding_math
2251 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2252 && !flag_unsafe_math_optimizations))
2253 && (inexact || !real_identical (&result, &value)))
2254 return NULL_RTX;
2256 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2260 /* We can fold some multi-word operations. */
2261 if (GET_MODE_CLASS (mode) == MODE_INT
2262 && width == HOST_BITS_PER_WIDE_INT * 2
2263 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2264 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2266 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2267 HOST_WIDE_INT h1, h2, hv, ht;
2269 if (GET_CODE (op0) == CONST_DOUBLE)
2270 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2271 else
2272 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2274 if (GET_CODE (op1) == CONST_DOUBLE)
2275 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2276 else
2277 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2279 switch (code)
2281 case MINUS:
2282 /* A - B == A + (-B). */
2283 neg_double (l2, h2, &lv, &hv);
2284 l2 = lv, h2 = hv;
2286 /* Fall through.... */
2288 case PLUS:
2289 add_double (l1, h1, l2, h2, &lv, &hv);
2290 break;
2292 case MULT:
2293 mul_double (l1, h1, l2, h2, &lv, &hv);
2294 break;
2296 case DIV:
2297 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2298 &lv, &hv, &lt, &ht))
2299 return 0;
2300 break;
2302 case MOD:
2303 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2304 &lt, &ht, &lv, &hv))
2305 return 0;
2306 break;
2308 case UDIV:
2309 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2310 &lv, &hv, &lt, &ht))
2311 return 0;
2312 break;
2314 case UMOD:
2315 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2316 &lt, &ht, &lv, &hv))
2317 return 0;
2318 break;
2320 case AND:
2321 lv = l1 & l2, hv = h1 & h2;
2322 break;
2324 case IOR:
2325 lv = l1 | l2, hv = h1 | h2;
2326 break;
2328 case XOR:
2329 lv = l1 ^ l2, hv = h1 ^ h2;
2330 break;
2332 case SMIN:
2333 if (h1 < h2
2334 || (h1 == h2
2335 && ((unsigned HOST_WIDE_INT) l1
2336 < (unsigned HOST_WIDE_INT) l2)))
2337 lv = l1, hv = h1;
2338 else
2339 lv = l2, hv = h2;
2340 break;
2342 case SMAX:
2343 if (h1 > h2
2344 || (h1 == h2
2345 && ((unsigned HOST_WIDE_INT) l1
2346 > (unsigned HOST_WIDE_INT) l2)))
2347 lv = l1, hv = h1;
2348 else
2349 lv = l2, hv = h2;
2350 break;
2352 case UMIN:
2353 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2354 || (h1 == h2
2355 && ((unsigned HOST_WIDE_INT) l1
2356 < (unsigned HOST_WIDE_INT) l2)))
2357 lv = l1, hv = h1;
2358 else
2359 lv = l2, hv = h2;
2360 break;
2362 case UMAX:
2363 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2364 || (h1 == h2
2365 && ((unsigned HOST_WIDE_INT) l1
2366 > (unsigned HOST_WIDE_INT) l2)))
2367 lv = l1, hv = h1;
2368 else
2369 lv = l2, hv = h2;
2370 break;
2372 case LSHIFTRT: case ASHIFTRT:
2373 case ASHIFT:
2374 case ROTATE: case ROTATERT:
2375 if (SHIFT_COUNT_TRUNCATED)
2376 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2378 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2379 return 0;
2381 if (code == LSHIFTRT || code == ASHIFTRT)
2382 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2383 code == ASHIFTRT);
2384 else if (code == ASHIFT)
2385 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2386 else if (code == ROTATE)
2387 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2388 else /* code == ROTATERT */
2389 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2390 break;
2392 default:
2393 return 0;
2396 return immed_double_const (lv, hv, mode);
2399 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2400 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2402 /* Get the integer argument values in two forms:
2403 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2405 arg0 = INTVAL (op0);
2406 arg1 = INTVAL (op1);
2408 if (width < HOST_BITS_PER_WIDE_INT)
2410 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2411 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2413 arg0s = arg0;
2414 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2415 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2417 arg1s = arg1;
2418 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2419 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2421 else
2423 arg0s = arg0;
2424 arg1s = arg1;
2427 /* Compute the value of the arithmetic. */
2429 switch (code)
2431 case PLUS:
2432 val = arg0s + arg1s;
2433 break;
2435 case MINUS:
2436 val = arg0s - arg1s;
2437 break;
2439 case MULT:
2440 val = arg0s * arg1s;
2441 break;
2443 case DIV:
2444 if (arg1s == 0
2445 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2446 && arg1s == -1))
2447 return 0;
2448 val = arg0s / arg1s;
2449 break;
2451 case MOD:
2452 if (arg1s == 0
2453 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2454 && arg1s == -1))
2455 return 0;
2456 val = arg0s % arg1s;
2457 break;
2459 case UDIV:
2460 if (arg1 == 0
2461 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2462 && arg1s == -1))
2463 return 0;
2464 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2465 break;
2467 case UMOD:
2468 if (arg1 == 0
2469 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2470 && arg1s == -1))
2471 return 0;
2472 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2473 break;
2475 case AND:
2476 val = arg0 & arg1;
2477 break;
2479 case IOR:
2480 val = arg0 | arg1;
2481 break;
2483 case XOR:
2484 val = arg0 ^ arg1;
2485 break;
2487 case LSHIFTRT:
2488 case ASHIFT:
2489 case ASHIFTRT:
2490 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
2491 the value is in range. We can't return any old value for
2492 out-of-range arguments because either the middle-end (via
2493 shift_truncation_mask) or the back-end might be relying on
2494 target-specific knowledge. Nor can we rely on
2495 shift_truncation_mask, since the shift might not be part of an
2496 ashlM3, lshrM3 or ashrM3 instruction. */
2497 if (SHIFT_COUNT_TRUNCATED)
2498 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2499 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2500 return 0;
2502 val = (code == ASHIFT
2503 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2504 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2506 /* Sign-extend the result for arithmetic right shifts. */
2507 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2508 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2509 break;
2511 case ROTATERT:
2512 if (arg1 < 0)
2513 return 0;
2515 arg1 %= width;
2516 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2517 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2518 break;
2520 case ROTATE:
2521 if (arg1 < 0)
2522 return 0;
2524 arg1 %= width;
2525 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2526 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2527 break;
2529 case COMPARE:
2530 /* Do nothing here. */
2531 return 0;
2533 case SMIN:
2534 val = arg0s <= arg1s ? arg0s : arg1s;
2535 break;
2537 case UMIN:
2538 val = ((unsigned HOST_WIDE_INT) arg0
2539 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2540 break;
2542 case SMAX:
2543 val = arg0s > arg1s ? arg0s : arg1s;
2544 break;
2546 case UMAX:
2547 val = ((unsigned HOST_WIDE_INT) arg0
2548 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2549 break;
2551 case SS_PLUS:
2552 case US_PLUS:
2553 case SS_MINUS:
2554 case US_MINUS:
2555 /* ??? There are simplifications that can be done. */
2556 return 0;
2558 default:
2559 gcc_unreachable ();
2562 return gen_int_mode (val, mode);
2565 return NULL_RTX;
2570 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2571 PLUS or MINUS.
2573 Rather than test for specific case, we do this by a brute-force method
2574 and do all possible simplifications until no more changes occur. Then
2575 we rebuild the operation.
2577 If FORCE is true, then always generate the rtx. This is used to
2578 canonicalize stuff emitted from simplify_gen_binary. Note that this
2579 can still fail if the rtx is too complex. It won't fail just because
2580 the result is not 'simpler' than the input, however. */
2582 struct simplify_plus_minus_op_data
2584 rtx op;
2585 short neg;
2586 short ix;
2589 static int
2590 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2592 const struct simplify_plus_minus_op_data *d1 = p1;
2593 const struct simplify_plus_minus_op_data *d2 = p2;
2594 int result;
2596 result = (commutative_operand_precedence (d2->op)
2597 - commutative_operand_precedence (d1->op));
2598 if (result)
2599 return result;
2600 return d1->ix - d2->ix;
2603 static rtx
2604 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2605 rtx op1, int force)
2607 struct simplify_plus_minus_op_data ops[8];
2608 rtx result, tem;
2609 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2610 int first, changed;
2611 int i, j;
2613 memset (ops, 0, sizeof ops);
2615 /* Set up the two operands and then expand them until nothing has been
2616 changed. If we run out of room in our array, give up; this should
2617 almost never happen. */
2619 ops[0].op = op0;
2620 ops[0].neg = 0;
2621 ops[1].op = op1;
2622 ops[1].neg = (code == MINUS);
2626 changed = 0;
2628 for (i = 0; i < n_ops; i++)
2630 rtx this_op = ops[i].op;
2631 int this_neg = ops[i].neg;
2632 enum rtx_code this_code = GET_CODE (this_op);
2634 switch (this_code)
2636 case PLUS:
2637 case MINUS:
2638 if (n_ops == 7)
2639 return NULL_RTX;
2641 ops[n_ops].op = XEXP (this_op, 1);
2642 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2643 n_ops++;
2645 ops[i].op = XEXP (this_op, 0);
2646 input_ops++;
2647 changed = 1;
2648 break;
2650 case NEG:
2651 ops[i].op = XEXP (this_op, 0);
2652 ops[i].neg = ! this_neg;
2653 changed = 1;
2654 break;
2656 case CONST:
2657 if (n_ops < 7
2658 && GET_CODE (XEXP (this_op, 0)) == PLUS
2659 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2660 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2662 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2663 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2664 ops[n_ops].neg = this_neg;
2665 n_ops++;
2666 input_consts++;
2667 changed = 1;
2669 break;
2671 case NOT:
2672 /* ~a -> (-a - 1) */
2673 if (n_ops != 7)
2675 ops[n_ops].op = constm1_rtx;
2676 ops[n_ops++].neg = this_neg;
2677 ops[i].op = XEXP (this_op, 0);
2678 ops[i].neg = !this_neg;
2679 changed = 1;
2681 break;
2683 case CONST_INT:
2684 if (this_neg)
2686 ops[i].op = neg_const_int (mode, this_op);
2687 ops[i].neg = 0;
2688 changed = 1;
2690 break;
2692 default:
2693 break;
2697 while (changed);
2699 /* If we only have two operands, we can't do anything. */
2700 if (n_ops <= 2 && !force)
2701 return NULL_RTX;
2703 /* Count the number of CONSTs we didn't split above. */
2704 for (i = 0; i < n_ops; i++)
2705 if (GET_CODE (ops[i].op) == CONST)
2706 input_consts++;
2708 /* Now simplify each pair of operands until nothing changes. The first
2709 time through just simplify constants against each other. */
2711 first = 1;
2714 changed = first;
2716 for (i = 0; i < n_ops - 1; i++)
2717 for (j = i + 1; j < n_ops; j++)
2719 rtx lhs = ops[i].op, rhs = ops[j].op;
2720 int lneg = ops[i].neg, rneg = ops[j].neg;
2722 if (lhs != 0 && rhs != 0
2723 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2725 enum rtx_code ncode = PLUS;
2727 if (lneg != rneg)
2729 ncode = MINUS;
2730 if (lneg)
2731 tem = lhs, lhs = rhs, rhs = tem;
2733 else if (swap_commutative_operands_p (lhs, rhs))
2734 tem = lhs, lhs = rhs, rhs = tem;
2736 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2738 /* Reject "simplifications" that just wrap the two
2739 arguments in a CONST. Failure to do so can result
2740 in infinite recursion with simplify_binary_operation
2741 when it calls us to simplify CONST operations. */
2742 if (tem
2743 && ! (GET_CODE (tem) == CONST
2744 && GET_CODE (XEXP (tem, 0)) == ncode
2745 && XEXP (XEXP (tem, 0), 0) == lhs
2746 && XEXP (XEXP (tem, 0), 1) == rhs)
2747 /* Don't allow -x + -1 -> ~x simplifications in the
2748 first pass. This allows us the chance to combine
2749 the -1 with other constants. */
2750 && ! (first
2751 && GET_CODE (tem) == NOT
2752 && XEXP (tem, 0) == rhs))
2754 lneg &= rneg;
2755 if (GET_CODE (tem) == NEG)
2756 tem = XEXP (tem, 0), lneg = !lneg;
2757 if (GET_CODE (tem) == CONST_INT && lneg)
2758 tem = neg_const_int (mode, tem), lneg = 0;
2760 ops[i].op = tem;
2761 ops[i].neg = lneg;
2762 ops[j].op = NULL_RTX;
2763 changed = 1;
2768 first = 0;
2770 while (changed);
2772 /* Pack all the operands to the lower-numbered entries. */
2773 for (i = 0, j = 0; j < n_ops; j++)
2774 if (ops[j].op)
2776 ops[i] = ops[j];
2777 /* Stabilize sort. */
2778 ops[i].ix = i;
2779 i++;
2781 n_ops = i;
2783 /* Sort the operations based on swap_commutative_operands_p. */
2784 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2786 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2787 if (n_ops == 2
2788 && GET_CODE (ops[1].op) == CONST_INT
2789 && CONSTANT_P (ops[0].op)
2790 && ops[0].neg)
2791 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2793 /* We suppressed creation of trivial CONST expressions in the
2794 combination loop to avoid recursion. Create one manually now.
2795 The combination loop should have ensured that there is exactly
2796 one CONST_INT, and the sort will have ensured that it is last
2797 in the array and that any other constant will be next-to-last. */
2799 if (n_ops > 1
2800 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2801 && CONSTANT_P (ops[n_ops - 2].op))
2803 rtx value = ops[n_ops - 1].op;
2804 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2805 value = neg_const_int (mode, value);
2806 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2807 n_ops--;
2810 /* Count the number of CONSTs that we generated. */
2811 n_consts = 0;
2812 for (i = 0; i < n_ops; i++)
2813 if (GET_CODE (ops[i].op) == CONST)
2814 n_consts++;
2816 /* Give up if we didn't reduce the number of operands we had. Make
2817 sure we count a CONST as two operands. If we have the same
2818 number of operands, but have made more CONSTs than before, this
2819 is also an improvement, so accept it. */
2820 if (!force
2821 && (n_ops + n_consts > input_ops
2822 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2823 return NULL_RTX;
2825 /* Put a non-negated operand first, if possible. */
2827 for (i = 0; i < n_ops && ops[i].neg; i++)
2828 continue;
2829 if (i == n_ops)
2830 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2831 else if (i != 0)
2833 tem = ops[0].op;
2834 ops[0] = ops[i];
2835 ops[i].op = tem;
2836 ops[i].neg = 1;
2839 /* Now make the result by performing the requested operations. */
2840 result = ops[0].op;
2841 for (i = 1; i < n_ops; i++)
2842 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2843 mode, result, ops[i].op);
2845 return result;
2848 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2849 static bool
2850 plus_minus_operand_p (rtx x)
2852 return GET_CODE (x) == PLUS
2853 || GET_CODE (x) == MINUS
2854 || (GET_CODE (x) == CONST
2855 && GET_CODE (XEXP (x, 0)) == PLUS
2856 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2857 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2860 /* Like simplify_binary_operation except used for relational operators.
2861 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2862 not also be VOIDmode.
2864 CMP_MODE specifies in which mode the comparison is done in, so it is
2865 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2866 the operands or, if both are VOIDmode, the operands are compared in
2867 "infinite precision". */
2869 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2870 enum machine_mode cmp_mode, rtx op0, rtx op1)
2872 rtx tem, trueop0, trueop1;
2874 if (cmp_mode == VOIDmode)
2875 cmp_mode = GET_MODE (op0);
2876 if (cmp_mode == VOIDmode)
2877 cmp_mode = GET_MODE (op1);
2879 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2880 if (tem)
2882 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2884 if (tem == const0_rtx)
2885 return CONST0_RTX (mode);
2886 #ifdef FLOAT_STORE_FLAG_VALUE
2888 REAL_VALUE_TYPE val;
2889 val = FLOAT_STORE_FLAG_VALUE (mode);
2890 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2892 #else
2893 return NULL_RTX;
2894 #endif
2896 if (VECTOR_MODE_P (mode))
2898 if (tem == const0_rtx)
2899 return CONST0_RTX (mode);
2900 #ifdef VECTOR_STORE_FLAG_VALUE
2902 int i, units;
2903 rtvec v;
2905 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2906 if (val == NULL_RTX)
2907 return NULL_RTX;
2908 if (val == const1_rtx)
2909 return CONST1_RTX (mode);
2911 units = GET_MODE_NUNITS (mode);
2912 v = rtvec_alloc (units);
2913 for (i = 0; i < units; i++)
2914 RTVEC_ELT (v, i) = val;
2915 return gen_rtx_raw_CONST_VECTOR (mode, v);
2917 #else
2918 return NULL_RTX;
2919 #endif
2922 return tem;
2925 /* For the following tests, ensure const0_rtx is op1. */
2926 if (swap_commutative_operands_p (op0, op1)
2927 || (op0 == const0_rtx && op1 != const0_rtx))
2928 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2930 /* If op0 is a compare, extract the comparison arguments from it. */
2931 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2932 return simplify_relational_operation (code, mode, VOIDmode,
2933 XEXP (op0, 0), XEXP (op0, 1));
2935 if (mode == VOIDmode
2936 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2937 || CC0_P (op0))
2938 return NULL_RTX;
2940 trueop0 = avoid_constant_pool_reference (op0);
2941 trueop1 = avoid_constant_pool_reference (op1);
2942 return simplify_relational_operation_1 (code, mode, cmp_mode,
2943 trueop0, trueop1);
2946 /* This part of simplify_relational_operation is only used when CMP_MODE
2947 is not in class MODE_CC (i.e. it is a real comparison).
2949 MODE is the mode of the result, while CMP_MODE specifies in which
2950 mode the comparison is done in, so it is the mode of the operands. */
2952 static rtx
2953 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2954 enum machine_mode cmp_mode, rtx op0, rtx op1)
2956 enum rtx_code op0code = GET_CODE (op0);
2958 if (GET_CODE (op1) == CONST_INT)
2960 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2962 /* If op0 is a comparison, extract the comparison arguments form it. */
2963 if (code == NE)
2965 if (GET_MODE (op0) == mode)
2966 return simplify_rtx (op0);
2967 else
2968 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2969 XEXP (op0, 0), XEXP (op0, 1));
2971 else if (code == EQ)
2973 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2974 if (new_code != UNKNOWN)
2975 return simplify_gen_relational (new_code, mode, VOIDmode,
2976 XEXP (op0, 0), XEXP (op0, 1));
2981 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2982 if ((code == EQ || code == NE)
2983 && (op0code == PLUS || op0code == MINUS)
2984 && CONSTANT_P (op1)
2985 && CONSTANT_P (XEXP (op0, 1))
2986 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
2988 rtx x = XEXP (op0, 0);
2989 rtx c = XEXP (op0, 1);
2991 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
2992 cmp_mode, op1, c);
2993 return simplify_gen_relational (code, mode, cmp_mode, x, c);
2996 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
2997 the same as (zero_extract:SI FOO (const_int 1) BAR). */
2998 if (code == NE
2999 && op1 == const0_rtx
3000 && GET_MODE_CLASS (mode) == MODE_INT
3001 && cmp_mode != VOIDmode
3002 /* ??? Work-around BImode bugs in the ia64 backend. */
3003 && mode != BImode
3004 && cmp_mode != BImode
3005 && nonzero_bits (op0, cmp_mode) == 1
3006 && STORE_FLAG_VALUE == 1)
3007 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3008 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3009 : lowpart_subreg (mode, op0, cmp_mode);
3011 return NULL_RTX;
3014 /* Check if the given comparison (done in the given MODE) is actually a
3015 tautology or a contradiction.
3016 If no simplification is possible, this function returns zero.
3017 Otherwise, it returns either const_true_rtx or const0_rtx. */
3020 simplify_const_relational_operation (enum rtx_code code,
3021 enum machine_mode mode,
3022 rtx op0, rtx op1)
3024 int equal, op0lt, op0ltu, op1lt, op1ltu;
3025 rtx tem;
3026 rtx trueop0;
3027 rtx trueop1;
3029 gcc_assert (mode != VOIDmode
3030 || (GET_MODE (op0) == VOIDmode
3031 && GET_MODE (op1) == VOIDmode));
3033 /* If op0 is a compare, extract the comparison arguments from it. */
3034 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3036 op1 = XEXP (op0, 1);
3037 op0 = XEXP (op0, 0);
3039 if (GET_MODE (op0) != VOIDmode)
3040 mode = GET_MODE (op0);
3041 else if (GET_MODE (op1) != VOIDmode)
3042 mode = GET_MODE (op1);
3043 else
3044 return 0;
3047 /* We can't simplify MODE_CC values since we don't know what the
3048 actual comparison is. */
3049 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3050 return 0;
3052 /* Make sure the constant is second. */
3053 if (swap_commutative_operands_p (op0, op1))
3055 tem = op0, op0 = op1, op1 = tem;
3056 code = swap_condition (code);
3059 trueop0 = avoid_constant_pool_reference (op0);
3060 trueop1 = avoid_constant_pool_reference (op1);
3062 /* For integer comparisons of A and B maybe we can simplify A - B and can
3063 then simplify a comparison of that with zero. If A and B are both either
3064 a register or a CONST_INT, this can't help; testing for these cases will
3065 prevent infinite recursion here and speed things up.
3067 If CODE is an unsigned comparison, then we can never do this optimization,
3068 because it gives an incorrect result if the subtraction wraps around zero.
3069 ANSI C defines unsigned operations such that they never overflow, and
3070 thus such cases can not be ignored; but we cannot do it even for
3071 signed comparisons for languages such as Java, so test flag_wrapv. */
3073 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3074 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3075 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3076 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3077 /* We cannot do this for == or != if tem is a nonzero address. */
3078 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3079 && code != GTU && code != GEU && code != LTU && code != LEU)
3080 return simplify_const_relational_operation (signed_condition (code),
3081 mode, tem, const0_rtx);
3083 if (flag_unsafe_math_optimizations && code == ORDERED)
3084 return const_true_rtx;
3086 if (flag_unsafe_math_optimizations && code == UNORDERED)
3087 return const0_rtx;
3089 /* For modes without NaNs, if the two operands are equal, we know the
3090 result except if they have side-effects. */
3091 if (! HONOR_NANS (GET_MODE (trueop0))
3092 && rtx_equal_p (trueop0, trueop1)
3093 && ! side_effects_p (trueop0))
3094 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3096 /* If the operands are floating-point constants, see if we can fold
3097 the result. */
3098 else if (GET_CODE (trueop0) == CONST_DOUBLE
3099 && GET_CODE (trueop1) == CONST_DOUBLE
3100 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
3102 REAL_VALUE_TYPE d0, d1;
3104 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3105 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3107 /* Comparisons are unordered iff at least one of the values is NaN. */
3108 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3109 switch (code)
3111 case UNEQ:
3112 case UNLT:
3113 case UNGT:
3114 case UNLE:
3115 case UNGE:
3116 case NE:
3117 case UNORDERED:
3118 return const_true_rtx;
3119 case EQ:
3120 case LT:
3121 case GT:
3122 case LE:
3123 case GE:
3124 case LTGT:
3125 case ORDERED:
3126 return const0_rtx;
3127 default:
3128 return 0;
3131 equal = REAL_VALUES_EQUAL (d0, d1);
3132 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3133 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3136 /* Otherwise, see if the operands are both integers. */
3137 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3138 && (GET_CODE (trueop0) == CONST_DOUBLE
3139 || GET_CODE (trueop0) == CONST_INT)
3140 && (GET_CODE (trueop1) == CONST_DOUBLE
3141 || GET_CODE (trueop1) == CONST_INT))
3143 int width = GET_MODE_BITSIZE (mode);
3144 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3145 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3147 /* Get the two words comprising each integer constant. */
3148 if (GET_CODE (trueop0) == CONST_DOUBLE)
3150 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3151 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3153 else
3155 l0u = l0s = INTVAL (trueop0);
3156 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3159 if (GET_CODE (trueop1) == CONST_DOUBLE)
3161 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3162 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3164 else
3166 l1u = l1s = INTVAL (trueop1);
3167 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3170 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3171 we have to sign or zero-extend the values. */
3172 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3174 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3175 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3177 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3178 l0s |= ((HOST_WIDE_INT) (-1) << width);
3180 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3181 l1s |= ((HOST_WIDE_INT) (-1) << width);
3183 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3184 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3186 equal = (h0u == h1u && l0u == l1u);
3187 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3188 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3189 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3190 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3193 /* Otherwise, there are some code-specific tests we can make. */
3194 else
3196 /* Optimize comparisons with upper and lower bounds. */
3197 if (SCALAR_INT_MODE_P (mode)
3198 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3200 rtx mmin, mmax;
3201 int sign;
3203 if (code == GEU
3204 || code == LEU
3205 || code == GTU
3206 || code == LTU)
3207 sign = 0;
3208 else
3209 sign = 1;
3211 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3213 tem = NULL_RTX;
3214 switch (code)
3216 case GEU:
3217 case GE:
3218 /* x >= min is always true. */
3219 if (rtx_equal_p (trueop1, mmin))
3220 tem = const_true_rtx;
3221 else
3222 break;
3224 case LEU:
3225 case LE:
3226 /* x <= max is always true. */
3227 if (rtx_equal_p (trueop1, mmax))
3228 tem = const_true_rtx;
3229 break;
3231 case GTU:
3232 case GT:
3233 /* x > max is always false. */
3234 if (rtx_equal_p (trueop1, mmax))
3235 tem = const0_rtx;
3236 break;
3238 case LTU:
3239 case LT:
3240 /* x < min is always false. */
3241 if (rtx_equal_p (trueop1, mmin))
3242 tem = const0_rtx;
3243 break;
3245 default:
3246 break;
3248 if (tem == const0_rtx
3249 || tem == const_true_rtx)
3250 return tem;
3253 switch (code)
3255 case EQ:
3256 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3257 return const0_rtx;
3258 break;
3260 case NE:
3261 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3262 return const_true_rtx;
3263 break;
3265 case LT:
3266 /* Optimize abs(x) < 0.0. */
3267 if (trueop1 == CONST0_RTX (mode)
3268 && !HONOR_SNANS (mode)
3269 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3271 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3272 : trueop0;
3273 if (GET_CODE (tem) == ABS)
3274 return const0_rtx;
3276 break;
3278 case GE:
3279 /* Optimize abs(x) >= 0.0. */
3280 if (trueop1 == CONST0_RTX (mode)
3281 && !HONOR_NANS (mode)
3282 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3284 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3285 : trueop0;
3286 if (GET_CODE (tem) == ABS)
3287 return const_true_rtx;
3289 break;
3291 case UNGE:
3292 /* Optimize ! (abs(x) < 0.0). */
3293 if (trueop1 == CONST0_RTX (mode))
3295 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3296 : trueop0;
3297 if (GET_CODE (tem) == ABS)
3298 return const_true_rtx;
3300 break;
3302 default:
3303 break;
3306 return 0;
3309 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3310 as appropriate. */
3311 switch (code)
3313 case EQ:
3314 case UNEQ:
3315 return equal ? const_true_rtx : const0_rtx;
3316 case NE:
3317 case LTGT:
3318 return ! equal ? const_true_rtx : const0_rtx;
3319 case LT:
3320 case UNLT:
3321 return op0lt ? const_true_rtx : const0_rtx;
3322 case GT:
3323 case UNGT:
3324 return op1lt ? const_true_rtx : const0_rtx;
3325 case LTU:
3326 return op0ltu ? const_true_rtx : const0_rtx;
3327 case GTU:
3328 return op1ltu ? const_true_rtx : const0_rtx;
3329 case LE:
3330 case UNLE:
3331 return equal || op0lt ? const_true_rtx : const0_rtx;
3332 case GE:
3333 case UNGE:
3334 return equal || op1lt ? const_true_rtx : const0_rtx;
3335 case LEU:
3336 return equal || op0ltu ? const_true_rtx : const0_rtx;
3337 case GEU:
3338 return equal || op1ltu ? const_true_rtx : const0_rtx;
3339 case ORDERED:
3340 return const_true_rtx;
3341 case UNORDERED:
3342 return const0_rtx;
3343 default:
3344 gcc_unreachable ();
3348 /* Simplify CODE, an operation with result mode MODE and three operands,
3349 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3350 a constant. Return 0 if no simplifications is possible. */
3353 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3354 enum machine_mode op0_mode, rtx op0, rtx op1,
3355 rtx op2)
3357 unsigned int width = GET_MODE_BITSIZE (mode);
3359 /* VOIDmode means "infinite" precision. */
3360 if (width == 0)
3361 width = HOST_BITS_PER_WIDE_INT;
3363 switch (code)
3365 case SIGN_EXTRACT:
3366 case ZERO_EXTRACT:
3367 if (GET_CODE (op0) == CONST_INT
3368 && GET_CODE (op1) == CONST_INT
3369 && GET_CODE (op2) == CONST_INT
3370 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3371 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3373 /* Extracting a bit-field from a constant */
3374 HOST_WIDE_INT val = INTVAL (op0);
3376 if (BITS_BIG_ENDIAN)
3377 val >>= (GET_MODE_BITSIZE (op0_mode)
3378 - INTVAL (op2) - INTVAL (op1));
3379 else
3380 val >>= INTVAL (op2);
3382 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3384 /* First zero-extend. */
3385 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3386 /* If desired, propagate sign bit. */
3387 if (code == SIGN_EXTRACT
3388 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3389 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3392 /* Clear the bits that don't belong in our mode,
3393 unless they and our sign bit are all one.
3394 So we get either a reasonable negative value or a reasonable
3395 unsigned value for this mode. */
3396 if (width < HOST_BITS_PER_WIDE_INT
3397 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3398 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3399 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3401 return gen_int_mode (val, mode);
3403 break;
3405 case IF_THEN_ELSE:
3406 if (GET_CODE (op0) == CONST_INT)
3407 return op0 != const0_rtx ? op1 : op2;
3409 /* Convert c ? a : a into "a". */
3410 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3411 return op1;
3413 /* Convert a != b ? a : b into "a". */
3414 if (GET_CODE (op0) == NE
3415 && ! side_effects_p (op0)
3416 && ! HONOR_NANS (mode)
3417 && ! HONOR_SIGNED_ZEROS (mode)
3418 && ((rtx_equal_p (XEXP (op0, 0), op1)
3419 && rtx_equal_p (XEXP (op0, 1), op2))
3420 || (rtx_equal_p (XEXP (op0, 0), op2)
3421 && rtx_equal_p (XEXP (op0, 1), op1))))
3422 return op1;
3424 /* Convert a == b ? a : b into "b". */
3425 if (GET_CODE (op0) == EQ
3426 && ! side_effects_p (op0)
3427 && ! HONOR_NANS (mode)
3428 && ! HONOR_SIGNED_ZEROS (mode)
3429 && ((rtx_equal_p (XEXP (op0, 0), op1)
3430 && rtx_equal_p (XEXP (op0, 1), op2))
3431 || (rtx_equal_p (XEXP (op0, 0), op2)
3432 && rtx_equal_p (XEXP (op0, 1), op1))))
3433 return op2;
3435 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3437 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3438 ? GET_MODE (XEXP (op0, 1))
3439 : GET_MODE (XEXP (op0, 0)));
3440 rtx temp;
3442 /* Look for happy constants in op1 and op2. */
3443 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3445 HOST_WIDE_INT t = INTVAL (op1);
3446 HOST_WIDE_INT f = INTVAL (op2);
3448 if (t == STORE_FLAG_VALUE && f == 0)
3449 code = GET_CODE (op0);
3450 else if (t == 0 && f == STORE_FLAG_VALUE)
3452 enum rtx_code tmp;
3453 tmp = reversed_comparison_code (op0, NULL_RTX);
3454 if (tmp == UNKNOWN)
3455 break;
3456 code = tmp;
3458 else
3459 break;
3461 return simplify_gen_relational (code, mode, cmp_mode,
3462 XEXP (op0, 0), XEXP (op0, 1));
3465 if (cmp_mode == VOIDmode)
3466 cmp_mode = op0_mode;
3467 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3468 cmp_mode, XEXP (op0, 0),
3469 XEXP (op0, 1));
3471 /* See if any simplifications were possible. */
3472 if (temp)
3474 if (GET_CODE (temp) == CONST_INT)
3475 return temp == const0_rtx ? op2 : op1;
3476 else if (temp)
3477 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3480 break;
3482 case VEC_MERGE:
3483 gcc_assert (GET_MODE (op0) == mode);
3484 gcc_assert (GET_MODE (op1) == mode);
3485 gcc_assert (VECTOR_MODE_P (mode));
3486 op2 = avoid_constant_pool_reference (op2);
3487 if (GET_CODE (op2) == CONST_INT)
3489 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3490 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3491 int mask = (1 << n_elts) - 1;
3493 if (!(INTVAL (op2) & mask))
3494 return op1;
3495 if ((INTVAL (op2) & mask) == mask)
3496 return op0;
3498 op0 = avoid_constant_pool_reference (op0);
3499 op1 = avoid_constant_pool_reference (op1);
3500 if (GET_CODE (op0) == CONST_VECTOR
3501 && GET_CODE (op1) == CONST_VECTOR)
3503 rtvec v = rtvec_alloc (n_elts);
3504 unsigned int i;
3506 for (i = 0; i < n_elts; i++)
3507 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3508 ? CONST_VECTOR_ELT (op0, i)
3509 : CONST_VECTOR_ELT (op1, i));
3510 return gen_rtx_CONST_VECTOR (mode, v);
3513 break;
3515 default:
3516 gcc_unreachable ();
3519 return 0;
3522 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3523 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3525 Works by unpacking OP into a collection of 8-bit values
3526 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3527 and then repacking them again for OUTERMODE. */
3529 static rtx
3530 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3531 enum machine_mode innermode, unsigned int byte)
3533 /* We support up to 512-bit values (for V8DFmode). */
3534 enum {
3535 max_bitsize = 512,
3536 value_bit = 8,
3537 value_mask = (1 << value_bit) - 1
3539 unsigned char value[max_bitsize / value_bit];
3540 int value_start;
3541 int i;
3542 int elem;
3544 int num_elem;
3545 rtx * elems;
3546 int elem_bitsize;
3547 rtx result_s;
3548 rtvec result_v = NULL;
3549 enum mode_class outer_class;
3550 enum machine_mode outer_submode;
3552 /* Some ports misuse CCmode. */
3553 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3554 return op;
3556 /* We have no way to represent a complex constant at the rtl level. */
3557 if (COMPLEX_MODE_P (outermode))
3558 return NULL_RTX;
3560 /* Unpack the value. */
3562 if (GET_CODE (op) == CONST_VECTOR)
3564 num_elem = CONST_VECTOR_NUNITS (op);
3565 elems = &CONST_VECTOR_ELT (op, 0);
3566 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3568 else
3570 num_elem = 1;
3571 elems = &op;
3572 elem_bitsize = max_bitsize;
3574 /* If this asserts, it is too complicated; reducing value_bit may help. */
3575 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3576 /* I don't know how to handle endianness of sub-units. */
3577 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3579 for (elem = 0; elem < num_elem; elem++)
3581 unsigned char * vp;
3582 rtx el = elems[elem];
3584 /* Vectors are kept in target memory order. (This is probably
3585 a mistake.) */
3587 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3588 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3589 / BITS_PER_UNIT);
3590 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3591 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3592 unsigned bytele = (subword_byte % UNITS_PER_WORD
3593 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3594 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3597 switch (GET_CODE (el))
3599 case CONST_INT:
3600 for (i = 0;
3601 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3602 i += value_bit)
3603 *vp++ = INTVAL (el) >> i;
3604 /* CONST_INTs are always logically sign-extended. */
3605 for (; i < elem_bitsize; i += value_bit)
3606 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3607 break;
3609 case CONST_DOUBLE:
3610 if (GET_MODE (el) == VOIDmode)
3612 /* If this triggers, someone should have generated a
3613 CONST_INT instead. */
3614 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3616 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3617 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3618 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3620 *vp++
3621 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3622 i += value_bit;
3624 /* It shouldn't matter what's done here, so fill it with
3625 zero. */
3626 for (; i < elem_bitsize; i += value_bit)
3627 *vp++ = 0;
3629 else
3631 long tmp[max_bitsize / 32];
3632 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3634 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3635 gcc_assert (bitsize <= elem_bitsize);
3636 gcc_assert (bitsize % value_bit == 0);
3638 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3639 GET_MODE (el));
3641 /* real_to_target produces its result in words affected by
3642 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3643 and use WORDS_BIG_ENDIAN instead; see the documentation
3644 of SUBREG in rtl.texi. */
3645 for (i = 0; i < bitsize; i += value_bit)
3647 int ibase;
3648 if (WORDS_BIG_ENDIAN)
3649 ibase = bitsize - 1 - i;
3650 else
3651 ibase = i;
3652 *vp++ = tmp[ibase / 32] >> i % 32;
3655 /* It shouldn't matter what's done here, so fill it with
3656 zero. */
3657 for (; i < elem_bitsize; i += value_bit)
3658 *vp++ = 0;
3660 break;
3662 default:
3663 gcc_unreachable ();
3667 /* Now, pick the right byte to start with. */
3668 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3669 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3670 will already have offset 0. */
3671 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3673 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3674 - byte);
3675 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3676 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3677 byte = (subword_byte % UNITS_PER_WORD
3678 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3681 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3682 so if it's become negative it will instead be very large.) */
3683 gcc_assert (byte < GET_MODE_SIZE (innermode));
3685 /* Convert from bytes to chunks of size value_bit. */
3686 value_start = byte * (BITS_PER_UNIT / value_bit);
3688 /* Re-pack the value. */
3690 if (VECTOR_MODE_P (outermode))
3692 num_elem = GET_MODE_NUNITS (outermode);
3693 result_v = rtvec_alloc (num_elem);
3694 elems = &RTVEC_ELT (result_v, 0);
3695 outer_submode = GET_MODE_INNER (outermode);
3697 else
3699 num_elem = 1;
3700 elems = &result_s;
3701 outer_submode = outermode;
3704 outer_class = GET_MODE_CLASS (outer_submode);
3705 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3707 gcc_assert (elem_bitsize % value_bit == 0);
3708 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3710 for (elem = 0; elem < num_elem; elem++)
3712 unsigned char *vp;
3714 /* Vectors are stored in target memory order. (This is probably
3715 a mistake.) */
3717 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3718 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3719 / BITS_PER_UNIT);
3720 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3721 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3722 unsigned bytele = (subword_byte % UNITS_PER_WORD
3723 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3724 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3727 switch (outer_class)
3729 case MODE_INT:
3730 case MODE_PARTIAL_INT:
3732 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3734 for (i = 0;
3735 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3736 i += value_bit)
3737 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3738 for (; i < elem_bitsize; i += value_bit)
3739 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3740 << (i - HOST_BITS_PER_WIDE_INT));
3742 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3743 know why. */
3744 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3745 elems[elem] = gen_int_mode (lo, outer_submode);
3746 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
3747 elems[elem] = immed_double_const (lo, hi, outer_submode);
3748 else
3749 return NULL_RTX;
3751 break;
3753 case MODE_FLOAT:
3755 REAL_VALUE_TYPE r;
3756 long tmp[max_bitsize / 32];
3758 /* real_from_target wants its input in words affected by
3759 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3760 and use WORDS_BIG_ENDIAN instead; see the documentation
3761 of SUBREG in rtl.texi. */
3762 for (i = 0; i < max_bitsize / 32; i++)
3763 tmp[i] = 0;
3764 for (i = 0; i < elem_bitsize; i += value_bit)
3766 int ibase;
3767 if (WORDS_BIG_ENDIAN)
3768 ibase = elem_bitsize - 1 - i;
3769 else
3770 ibase = i;
3771 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3774 real_from_target (&r, tmp, outer_submode);
3775 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3777 break;
3779 default:
3780 gcc_unreachable ();
3783 if (VECTOR_MODE_P (outermode))
3784 return gen_rtx_CONST_VECTOR (outermode, result_v);
3785 else
3786 return result_s;
3789 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3790 Return 0 if no simplifications are possible. */
3792 simplify_subreg (enum machine_mode outermode, rtx op,
3793 enum machine_mode innermode, unsigned int byte)
3795 /* Little bit of sanity checking. */
3796 gcc_assert (innermode != VOIDmode);
3797 gcc_assert (outermode != VOIDmode);
3798 gcc_assert (innermode != BLKmode);
3799 gcc_assert (outermode != BLKmode);
3801 gcc_assert (GET_MODE (op) == innermode
3802 || GET_MODE (op) == VOIDmode);
3804 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3805 gcc_assert (byte < GET_MODE_SIZE (innermode));
3807 if (outermode == innermode && !byte)
3808 return op;
3810 if (GET_CODE (op) == CONST_INT
3811 || GET_CODE (op) == CONST_DOUBLE
3812 || GET_CODE (op) == CONST_VECTOR)
3813 return simplify_immed_subreg (outermode, op, innermode, byte);
3815 /* Changing mode twice with SUBREG => just change it once,
3816 or not at all if changing back op starting mode. */
3817 if (GET_CODE (op) == SUBREG)
3819 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3820 int final_offset = byte + SUBREG_BYTE (op);
3821 rtx newx;
3823 if (outermode == innermostmode
3824 && byte == 0 && SUBREG_BYTE (op) == 0)
3825 return SUBREG_REG (op);
3827 /* The SUBREG_BYTE represents offset, as if the value were stored
3828 in memory. Irritating exception is paradoxical subreg, where
3829 we define SUBREG_BYTE to be 0. On big endian machines, this
3830 value should be negative. For a moment, undo this exception. */
3831 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3833 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3834 if (WORDS_BIG_ENDIAN)
3835 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3836 if (BYTES_BIG_ENDIAN)
3837 final_offset += difference % UNITS_PER_WORD;
3839 if (SUBREG_BYTE (op) == 0
3840 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3842 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3843 if (WORDS_BIG_ENDIAN)
3844 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3845 if (BYTES_BIG_ENDIAN)
3846 final_offset += difference % UNITS_PER_WORD;
3849 /* See whether resulting subreg will be paradoxical. */
3850 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3852 /* In nonparadoxical subregs we can't handle negative offsets. */
3853 if (final_offset < 0)
3854 return NULL_RTX;
3855 /* Bail out in case resulting subreg would be incorrect. */
3856 if (final_offset % GET_MODE_SIZE (outermode)
3857 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3858 return NULL_RTX;
3860 else
3862 int offset = 0;
3863 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3865 /* In paradoxical subreg, see if we are still looking on lower part.
3866 If so, our SUBREG_BYTE will be 0. */
3867 if (WORDS_BIG_ENDIAN)
3868 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3869 if (BYTES_BIG_ENDIAN)
3870 offset += difference % UNITS_PER_WORD;
3871 if (offset == final_offset)
3872 final_offset = 0;
3873 else
3874 return NULL_RTX;
3877 /* Recurse for further possible simplifications. */
3878 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3879 final_offset);
3880 if (newx)
3881 return newx;
3882 if (validate_subreg (outermode, innermostmode,
3883 SUBREG_REG (op), final_offset))
3884 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3885 return NULL_RTX;
3888 /* SUBREG of a hard register => just change the register number
3889 and/or mode. If the hard register is not valid in that mode,
3890 suppress this simplification. If the hard register is the stack,
3891 frame, or argument pointer, leave this as a SUBREG. */
3893 if (REG_P (op)
3894 && REGNO (op) < FIRST_PSEUDO_REGISTER
3895 #ifdef CANNOT_CHANGE_MODE_CLASS
3896 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3897 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3898 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3899 #endif
3900 && ((reload_completed && !frame_pointer_needed)
3901 || (REGNO (op) != FRAME_POINTER_REGNUM
3902 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3903 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3904 #endif
3906 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3907 && REGNO (op) != ARG_POINTER_REGNUM
3908 #endif
3909 && REGNO (op) != STACK_POINTER_REGNUM
3910 && subreg_offset_representable_p (REGNO (op), innermode,
3911 byte, outermode))
3913 unsigned int regno = REGNO (op);
3914 unsigned int final_regno
3915 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3917 /* ??? We do allow it if the current REG is not valid for
3918 its mode. This is a kludge to work around how float/complex
3919 arguments are passed on 32-bit SPARC and should be fixed. */
3920 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3921 || ! HARD_REGNO_MODE_OK (regno, innermode))
3923 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3925 /* Propagate original regno. We don't have any way to specify
3926 the offset inside original regno, so do so only for lowpart.
3927 The information is used only by alias analysis that can not
3928 grog partial register anyway. */
3930 if (subreg_lowpart_offset (outermode, innermode) == byte)
3931 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3932 return x;
3936 /* If we have a SUBREG of a register that we are replacing and we are
3937 replacing it with a MEM, make a new MEM and try replacing the
3938 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3939 or if we would be widening it. */
3941 if (MEM_P (op)
3942 && ! mode_dependent_address_p (XEXP (op, 0))
3943 /* Allow splitting of volatile memory references in case we don't
3944 have instruction to move the whole thing. */
3945 && (! MEM_VOLATILE_P (op)
3946 || ! have_insn_for (SET, innermode))
3947 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3948 return adjust_address_nv (op, outermode, byte);
3950 /* Handle complex values represented as CONCAT
3951 of real and imaginary part. */
3952 if (GET_CODE (op) == CONCAT)
3954 unsigned int inner_size, final_offset;
3955 rtx part, res;
3957 inner_size = GET_MODE_UNIT_SIZE (innermode);
3958 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3959 final_offset = byte % inner_size;
3960 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3961 return NULL_RTX;
3963 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3964 if (res)
3965 return res;
3966 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3967 return gen_rtx_SUBREG (outermode, part, final_offset);
3968 return NULL_RTX;
3971 /* Optimize SUBREG truncations of zero and sign extended values. */
3972 if ((GET_CODE (op) == ZERO_EXTEND
3973 || GET_CODE (op) == SIGN_EXTEND)
3974 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3976 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3978 /* If we're requesting the lowpart of a zero or sign extension,
3979 there are three possibilities. If the outermode is the same
3980 as the origmode, we can omit both the extension and the subreg.
3981 If the outermode is not larger than the origmode, we can apply
3982 the truncation without the extension. Finally, if the outermode
3983 is larger than the origmode, but both are integer modes, we
3984 can just extend to the appropriate mode. */
3985 if (bitpos == 0)
3987 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3988 if (outermode == origmode)
3989 return XEXP (op, 0);
3990 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3991 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3992 subreg_lowpart_offset (outermode,
3993 origmode));
3994 if (SCALAR_INT_MODE_P (outermode))
3995 return simplify_gen_unary (GET_CODE (op), outermode,
3996 XEXP (op, 0), origmode);
3999 /* A SUBREG resulting from a zero extension may fold to zero if
4000 it extracts higher bits that the ZERO_EXTEND's source bits. */
4001 if (GET_CODE (op) == ZERO_EXTEND
4002 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4003 return CONST0_RTX (outermode);
4006 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4007 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4008 the outer subreg is effectively a truncation to the original mode. */
4009 if ((GET_CODE (op) == LSHIFTRT
4010 || GET_CODE (op) == ASHIFTRT)
4011 && SCALAR_INT_MODE_P (outermode)
4012 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4013 to avoid the possibility that an outer LSHIFTRT shifts by more
4014 than the sign extension's sign_bit_copies and introduces zeros
4015 into the high bits of the result. */
4016 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4017 && GET_CODE (XEXP (op, 1)) == CONST_INT
4018 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4019 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4020 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4021 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4022 return simplify_gen_binary (ASHIFTRT, outermode,
4023 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4025 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4026 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4027 the outer subreg is effectively a truncation to the original mode. */
4028 if ((GET_CODE (op) == LSHIFTRT
4029 || GET_CODE (op) == ASHIFTRT)
4030 && SCALAR_INT_MODE_P (outermode)
4031 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4032 && GET_CODE (XEXP (op, 1)) == CONST_INT
4033 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4034 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4035 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4036 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4037 return simplify_gen_binary (LSHIFTRT, outermode,
4038 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4040 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4041 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4042 the outer subreg is effectively a truncation to the original mode. */
4043 if (GET_CODE (op) == ASHIFT
4044 && SCALAR_INT_MODE_P (outermode)
4045 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4046 && GET_CODE (XEXP (op, 1)) == CONST_INT
4047 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4048 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4049 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4050 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4051 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4052 return simplify_gen_binary (ASHIFT, outermode,
4053 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4055 return NULL_RTX;
4058 /* Make a SUBREG operation or equivalent if it folds. */
4061 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4062 enum machine_mode innermode, unsigned int byte)
4064 rtx newx;
4066 newx = simplify_subreg (outermode, op, innermode, byte);
4067 if (newx)
4068 return newx;
4070 if (GET_CODE (op) == SUBREG
4071 || GET_CODE (op) == CONCAT
4072 || GET_MODE (op) == VOIDmode)
4073 return NULL_RTX;
4075 if (validate_subreg (outermode, innermode, op, byte))
4076 return gen_rtx_SUBREG (outermode, op, byte);
4078 return NULL_RTX;
4081 /* Simplify X, an rtx expression.
4083 Return the simplified expression or NULL if no simplifications
4084 were possible.
4086 This is the preferred entry point into the simplification routines;
4087 however, we still allow passes to call the more specific routines.
4089 Right now GCC has three (yes, three) major bodies of RTL simplification
4090 code that need to be unified.
4092 1. fold_rtx in cse.c. This code uses various CSE specific
4093 information to aid in RTL simplification.
4095 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4096 it uses combine specific information to aid in RTL
4097 simplification.
4099 3. The routines in this file.
4102 Long term we want to only have one body of simplification code; to
4103 get to that state I recommend the following steps:
4105 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4106 which are not pass dependent state into these routines.
4108 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4109 use this routine whenever possible.
4111 3. Allow for pass dependent state to be provided to these
4112 routines and add simplifications based on the pass dependent
4113 state. Remove code from cse.c & combine.c that becomes
4114 redundant/dead.
4116 It will take time, but ultimately the compiler will be easier to
4117 maintain and improve. It's totally silly that when we add a
4118 simplification that it needs to be added to 4 places (3 for RTL
4119 simplification and 1 for tree simplification. */
4122 simplify_rtx (rtx x)
4124 enum rtx_code code = GET_CODE (x);
4125 enum machine_mode mode = GET_MODE (x);
4127 switch (GET_RTX_CLASS (code))
4129 case RTX_UNARY:
4130 return simplify_unary_operation (code, mode,
4131 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4132 case RTX_COMM_ARITH:
4133 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4134 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4136 /* Fall through.... */
4138 case RTX_BIN_ARITH:
4139 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4141 case RTX_TERNARY:
4142 case RTX_BITFIELD_OPS:
4143 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4144 XEXP (x, 0), XEXP (x, 1),
4145 XEXP (x, 2));
4147 case RTX_COMPARE:
4148 case RTX_COMM_COMPARE:
4149 return simplify_relational_operation (code, mode,
4150 ((GET_MODE (XEXP (x, 0))
4151 != VOIDmode)
4152 ? GET_MODE (XEXP (x, 0))
4153 : GET_MODE (XEXP (x, 1))),
4154 XEXP (x, 0),
4155 XEXP (x, 1));
4157 case RTX_EXTRA:
4158 if (code == SUBREG)
4159 return simplify_gen_subreg (mode, SUBREG_REG (x),
4160 GET_MODE (SUBREG_REG (x)),
4161 SUBREG_BYTE (x));
4162 break;
4164 case RTX_OBJ:
4165 if (code == LO_SUM)
4167 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4168 if (GET_CODE (XEXP (x, 0)) == HIGH
4169 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4170 return XEXP (x, 1);
4172 break;
4174 default:
4175 break;
4177 return NULL;