* varasm.c (named_section): Use xstrdup rather than doing it by
[official-gcc.git] / gcc / simplify-rtx.c
blobc6b0ec812797f5f65427c4ebe1f13d656ecbcffa
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 rtx, rtx, rtx, rtx);
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
69 static rtx
70 neg_const_int (enum machine_mode mode, rtx i)
72 return gen_int_mode (- INTVAL (i), mode);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
78 bool
79 mode_signbit_p (enum machine_mode mode, rtx x)
81 unsigned HOST_WIDE_INT val;
82 unsigned int width;
84 if (GET_MODE_CLASS (mode) != MODE_INT)
85 return false;
87 width = GET_MODE_BITSIZE (mode);
88 if (width == 0)
89 return false;
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
93 val = INTVAL (x);
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
101 else
102 return false;
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114 rtx op1)
116 rtx tem;
118 /* Put complex operands first and constants second if commutative. */
119 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
120 && swap_commutative_operands_p (op0, op1))
121 tem = op0, op0 = op1, op1 = tem;
123 /* If this simplifies, do it. */
124 tem = simplify_binary_operation (code, mode, op0, op1);
125 if (tem)
126 return tem;
128 /* Handle addition and subtraction specially. Otherwise, just form
129 the operation. */
131 if (code == PLUS || code == MINUS)
133 tem = simplify_plus_minus (code, mode, op0, op1, 1);
134 if (tem)
135 return tem;
138 return gen_rtx_fmt_ee (code, mode, op0, op1);
141 /* If X is a MEM referencing the constant pool, return the real value.
142 Otherwise return X. */
144 avoid_constant_pool_reference (rtx x)
146 rtx c, tmp, addr;
147 enum machine_mode cmode;
149 switch (GET_CODE (x))
151 case MEM:
152 break;
154 case FLOAT_EXTEND:
155 /* Handle float extensions of constant pool references. */
156 tmp = XEXP (x, 0);
157 c = avoid_constant_pool_reference (tmp);
158 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
160 REAL_VALUE_TYPE d;
162 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
163 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
165 return x;
167 default:
168 return x;
171 addr = XEXP (x, 0);
173 /* Call target hook to avoid the effects of -fpic etc.... */
174 addr = targetm.delegitimize_address (addr);
176 if (GET_CODE (addr) == LO_SUM)
177 addr = XEXP (addr, 1);
179 if (GET_CODE (addr) != SYMBOL_REF
180 || ! CONSTANT_POOL_ADDRESS_P (addr))
181 return x;
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (cmode != GET_MODE (x))
191 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
192 return c ? c : x;
195 return c;
198 /* Make a unary operation by first seeing if it folds and otherwise making
199 the specified operation. */
202 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
203 enum machine_mode op_mode)
205 rtx tem;
207 /* If this simplifies, use it. */
208 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
209 return tem;
211 return gen_rtx_fmt_e (code, mode, op);
214 /* Likewise for ternary operations. */
217 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
218 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
220 rtx tem;
222 /* If this simplifies, use it. */
223 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
224 op0, op1, op2)))
225 return tem;
227 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
230 /* Likewise, for relational operations.
231 CMP_MODE specifies mode comparison is done in. */
234 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
235 enum machine_mode cmp_mode, rtx op0, rtx op1)
237 rtx tem;
239 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
240 op0, op1)))
241 return tem;
243 return gen_rtx_fmt_ee (code, mode, op0, op1);
246 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
247 resulting RTX. Return a new RTX which is as simplified as possible. */
250 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
252 enum rtx_code code = GET_CODE (x);
253 enum machine_mode mode = GET_MODE (x);
254 enum machine_mode op_mode;
255 rtx op0, op1, op2;
257 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
258 to build a new expression substituting recursively. If we can't do
259 anything, return our input. */
261 if (x == old_rtx)
262 return new_rtx;
264 switch (GET_RTX_CLASS (code))
266 case RTX_UNARY:
267 op0 = XEXP (x, 0);
268 op_mode = GET_MODE (op0);
269 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
270 if (op0 == XEXP (x, 0))
271 return x;
272 return simplify_gen_unary (code, mode, op0, op_mode);
274 case RTX_BIN_ARITH:
275 case RTX_COMM_ARITH:
276 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
277 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
278 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
279 return x;
280 return simplify_gen_binary (code, mode, op0, op1);
282 case RTX_COMPARE:
283 case RTX_COMM_COMPARE:
284 op0 = XEXP (x, 0);
285 op1 = XEXP (x, 1);
286 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
287 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
288 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
289 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
290 return x;
291 return simplify_gen_relational (code, mode, op_mode, op0, op1);
293 case RTX_TERNARY:
294 case RTX_BITFIELD_OPS:
295 op0 = XEXP (x, 0);
296 op_mode = GET_MODE (op0);
297 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
298 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
299 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
300 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
301 return x;
302 if (op_mode == VOIDmode)
303 op_mode = GET_MODE (op0);
304 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
306 case RTX_EXTRA:
307 /* The only case we try to handle is a SUBREG. */
308 if (code == SUBREG)
310 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
311 if (op0 == SUBREG_REG (x))
312 return x;
313 op0 = simplify_gen_subreg (GET_MODE (x), op0,
314 GET_MODE (SUBREG_REG (x)),
315 SUBREG_BYTE (x));
316 return op0 ? op0 : x;
318 break;
320 case RTX_OBJ:
321 if (code == MEM)
323 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
324 if (op0 == XEXP (x, 0))
325 return x;
326 return replace_equiv_address_nv (x, op0);
328 else if (code == LO_SUM)
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
333 /* (lo_sum (high x) x) -> x */
334 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
335 return op1;
337 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
338 return x;
339 return gen_rtx_LO_SUM (mode, op0, op1);
341 else if (code == REG)
343 if (rtx_equal_p (x, old_rtx))
344 return new_rtx;
346 break;
348 default:
349 break;
351 return x;
354 /* Try to simplify a unary operation CODE whose output mode is to be
355 MODE with input operand OP whose mode was originally OP_MODE.
356 Return zero if no simplification can be made. */
358 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
359 rtx op, enum machine_mode op_mode)
361 rtx trueop, tem;
363 if (GET_CODE (op) == CONST)
364 op = XEXP (op, 0);
366 trueop = avoid_constant_pool_reference (op);
368 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
369 if (tem)
370 return tem;
372 return simplify_unary_operation_1 (code, mode, op);
375 /* Perform some simplifications we can do even if the operands
376 aren't constant. */
377 static rtx
378 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
380 enum rtx_code reversed;
381 rtx temp;
383 switch (code)
385 case NOT:
386 /* (not (not X)) == X. */
387 if (GET_CODE (op) == NOT)
388 return XEXP (op, 0);
390 /* (not (eq X Y)) == (ne X Y), etc. */
391 if (COMPARISON_P (op)
392 && (mode == BImode || STORE_FLAG_VALUE == -1)
393 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
394 return simplify_gen_relational (reversed, mode, VOIDmode,
395 XEXP (op, 0), XEXP (op, 1));
397 /* (not (plus X -1)) can become (neg X). */
398 if (GET_CODE (op) == PLUS
399 && XEXP (op, 1) == constm1_rtx)
400 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
402 /* Similarly, (not (neg X)) is (plus X -1). */
403 if (GET_CODE (op) == NEG)
404 return plus_constant (XEXP (op, 0), -1);
406 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
407 if (GET_CODE (op) == XOR
408 && GET_CODE (XEXP (op, 1)) == CONST_INT
409 && (temp = simplify_unary_operation (NOT, mode,
410 XEXP (op, 1), mode)) != 0)
411 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
413 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
414 if (GET_CODE (op) == PLUS
415 && GET_CODE (XEXP (op, 1)) == CONST_INT
416 && mode_signbit_p (mode, XEXP (op, 1))
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
422 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
423 operands other than 1, but that is not valid. We could do a
424 similar simplification for (not (lshiftrt C X)) where C is
425 just the sign bit, but this doesn't seem common enough to
426 bother with. */
427 if (GET_CODE (op) == ASHIFT
428 && XEXP (op, 0) == const1_rtx)
430 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
431 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
434 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
435 by reversing the comparison code if valid. */
436 if (STORE_FLAG_VALUE == -1
437 && COMPARISON_P (op)
438 && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
439 return simplify_gen_relational (reversed, mode, VOIDmode,
440 XEXP (op, 0), XEXP (op, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1)) == CONST_INT
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
453 break;
455 case NEG:
456 /* (neg (neg X)) == X. */
457 if (GET_CODE (op) == NEG)
458 return XEXP (op, 0);
460 /* (neg (plus X 1)) can become (not X). */
461 if (GET_CODE (op) == PLUS
462 && XEXP (op, 1) == const1_rtx)
463 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
465 /* Similarly, (neg (not X)) is (plus X 1). */
466 if (GET_CODE (op) == NOT)
467 return plus_constant (XEXP (op, 0), 1);
469 /* (neg (minus X Y)) can become (minus Y X). This transformation
470 isn't safe for modes with signed zeros, since if X and Y are
471 both +0, (minus Y X) is the same as (minus X Y). If the
472 rounding mode is towards +infinity (or -infinity) then the two
473 expressions will be rounded differently. */
474 if (GET_CODE (op) == MINUS
475 && !HONOR_SIGNED_ZEROS (mode)
476 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
477 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
479 if (GET_CODE (op) == PLUS
480 && !HONOR_SIGNED_ZEROS (mode)
481 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
483 /* (neg (plus A C)) is simplified to (minus -C A). */
484 if (GET_CODE (XEXP (op, 1)) == CONST_INT
485 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
487 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
488 if (temp)
489 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
492 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
493 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
494 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
497 /* (neg (mult A B)) becomes (mult (neg A) B).
498 This works even for floating-point values. */
499 if (GET_CODE (op) == MULT
500 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
502 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
503 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
506 /* NEG commutes with ASHIFT since it is multiplication. Only do
507 this if we can then eliminate the NEG (e.g., if the operand
508 is a constant). */
509 if (GET_CODE (op) == ASHIFT)
511 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
512 if (temp)
513 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
516 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
517 C is equal to the width of MODE minus 1. */
518 if (GET_CODE (op) == ASHIFTRT
519 && GET_CODE (XEXP (op, 1)) == CONST_INT
520 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
521 return simplify_gen_binary (LSHIFTRT, mode,
522 XEXP (op, 0), XEXP (op, 1));
524 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
525 C is equal to the width of MODE minus 1. */
526 if (GET_CODE (op) == LSHIFTRT
527 && GET_CODE (XEXP (op, 1)) == CONST_INT
528 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
529 return simplify_gen_binary (ASHIFTRT, mode,
530 XEXP (op, 0), XEXP (op, 1));
532 break;
534 case SIGN_EXTEND:
535 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
536 becomes just the MINUS if its mode is MODE. This allows
537 folding switch statements on machines using casesi (such as
538 the VAX). */
539 if (GET_CODE (op) == TRUNCATE
540 && GET_MODE (XEXP (op, 0)) == mode
541 && GET_CODE (XEXP (op, 0)) == MINUS
542 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
543 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
544 return XEXP (op, 0);
546 /* Check for a sign extension of a subreg of a promoted
547 variable, where the promotion is sign-extended, and the
548 target mode is the same as the variable's promotion. */
549 if (GET_CODE (op) == SUBREG
550 && SUBREG_PROMOTED_VAR_P (op)
551 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
552 && GET_MODE (XEXP (op, 0)) == mode)
553 return XEXP (op, 0);
555 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
556 if (! POINTERS_EXTEND_UNSIGNED
557 && mode == Pmode && GET_MODE (op) == ptr_mode
558 && (CONSTANT_P (op)
559 || (GET_CODE (op) == SUBREG
560 && REG_P (SUBREG_REG (op))
561 && REG_POINTER (SUBREG_REG (op))
562 && GET_MODE (SUBREG_REG (op)) == Pmode)))
563 return convert_memory_address (Pmode, op);
564 #endif
565 break;
567 case ZERO_EXTEND:
568 /* Check for a zero extension of a subreg of a promoted
569 variable, where the promotion is zero-extended, and the
570 target mode is the same as the variable's promotion. */
571 if (GET_CODE (op) == SUBREG
572 && SUBREG_PROMOTED_VAR_P (op)
573 && SUBREG_PROMOTED_UNSIGNED_P (op)
574 && GET_MODE (XEXP (op, 0)) == mode)
575 return XEXP (op, 0);
577 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
578 if (POINTERS_EXTEND_UNSIGNED > 0
579 && mode == Pmode && GET_MODE (op) == ptr_mode
580 && (CONSTANT_P (op)
581 || (GET_CODE (op) == SUBREG
582 && REG_P (SUBREG_REG (op))
583 && REG_POINTER (SUBREG_REG (op))
584 && GET_MODE (SUBREG_REG (op)) == Pmode)))
585 return convert_memory_address (Pmode, op);
586 #endif
587 break;
589 default:
590 break;
593 return 0;
596 /* Try to compute the value of a unary operation CODE whose output mode is to
597 be MODE with input operand OP whose mode was originally OP_MODE.
598 Return zero if the value cannot be computed. */
600 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
601 rtx op, enum machine_mode op_mode)
603 unsigned int width = GET_MODE_BITSIZE (mode);
605 if (code == VEC_DUPLICATE)
607 gcc_assert (VECTOR_MODE_P (mode));
608 if (GET_MODE (op) != VOIDmode)
610 if (!VECTOR_MODE_P (GET_MODE (op)))
611 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
612 else
613 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
614 (GET_MODE (op)));
616 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
617 || GET_CODE (op) == CONST_VECTOR)
619 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
620 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
621 rtvec v = rtvec_alloc (n_elts);
622 unsigned int i;
624 if (GET_CODE (op) != CONST_VECTOR)
625 for (i = 0; i < n_elts; i++)
626 RTVEC_ELT (v, i) = op;
627 else
629 enum machine_mode inmode = GET_MODE (op);
630 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
631 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
633 gcc_assert (in_n_elts < n_elts);
634 gcc_assert ((n_elts % in_n_elts) == 0);
635 for (i = 0; i < n_elts; i++)
636 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
638 return gen_rtx_CONST_VECTOR (mode, v);
642 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
644 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
645 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
646 enum machine_mode opmode = GET_MODE (op);
647 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
648 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
649 rtvec v = rtvec_alloc (n_elts);
650 unsigned int i;
652 gcc_assert (op_n_elts == n_elts);
653 for (i = 0; i < n_elts; i++)
655 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
656 CONST_VECTOR_ELT (op, i),
657 GET_MODE_INNER (opmode));
658 if (!x)
659 return 0;
660 RTVEC_ELT (v, i) = x;
662 return gen_rtx_CONST_VECTOR (mode, v);
665 /* The order of these tests is critical so that, for example, we don't
666 check the wrong mode (input vs. output) for a conversion operation,
667 such as FIX. At some point, this should be simplified. */
669 if (code == FLOAT && GET_MODE (op) == VOIDmode
670 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
672 HOST_WIDE_INT hv, lv;
673 REAL_VALUE_TYPE d;
675 if (GET_CODE (op) == CONST_INT)
676 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
677 else
678 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
680 REAL_VALUE_FROM_INT (d, lv, hv, mode);
681 d = real_value_truncate (mode, d);
682 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
684 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
685 && (GET_CODE (op) == CONST_DOUBLE
686 || GET_CODE (op) == CONST_INT))
688 HOST_WIDE_INT hv, lv;
689 REAL_VALUE_TYPE d;
691 if (GET_CODE (op) == CONST_INT)
692 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
693 else
694 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
696 if (op_mode == VOIDmode)
698 /* We don't know how to interpret negative-looking numbers in
699 this case, so don't try to fold those. */
700 if (hv < 0)
701 return 0;
703 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
705 else
706 hv = 0, lv &= GET_MODE_MASK (op_mode);
708 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
709 d = real_value_truncate (mode, d);
710 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
713 if (GET_CODE (op) == CONST_INT
714 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
716 HOST_WIDE_INT arg0 = INTVAL (op);
717 HOST_WIDE_INT val;
719 switch (code)
721 case NOT:
722 val = ~ arg0;
723 break;
725 case NEG:
726 val = - arg0;
727 break;
729 case ABS:
730 val = (arg0 >= 0 ? arg0 : - arg0);
731 break;
733 case FFS:
734 /* Don't use ffs here. Instead, get low order bit and then its
735 number. If arg0 is zero, this will return 0, as desired. */
736 arg0 &= GET_MODE_MASK (mode);
737 val = exact_log2 (arg0 & (- arg0)) + 1;
738 break;
740 case CLZ:
741 arg0 &= GET_MODE_MASK (mode);
742 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
744 else
745 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
746 break;
748 case CTZ:
749 arg0 &= GET_MODE_MASK (mode);
750 if (arg0 == 0)
752 /* Even if the value at zero is undefined, we have to come
753 up with some replacement. Seems good enough. */
754 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
755 val = GET_MODE_BITSIZE (mode);
757 else
758 val = exact_log2 (arg0 & -arg0);
759 break;
761 case POPCOUNT:
762 arg0 &= GET_MODE_MASK (mode);
763 val = 0;
764 while (arg0)
765 val++, arg0 &= arg0 - 1;
766 break;
768 case PARITY:
769 arg0 &= GET_MODE_MASK (mode);
770 val = 0;
771 while (arg0)
772 val++, arg0 &= arg0 - 1;
773 val &= 1;
774 break;
776 case TRUNCATE:
777 val = arg0;
778 break;
780 case ZERO_EXTEND:
781 /* When zero-extending a CONST_INT, we need to know its
782 original mode. */
783 gcc_assert (op_mode != VOIDmode);
784 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
786 /* If we were really extending the mode,
787 we would have to distinguish between zero-extension
788 and sign-extension. */
789 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
790 val = arg0;
792 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
793 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
794 else
795 return 0;
796 break;
798 case SIGN_EXTEND:
799 if (op_mode == VOIDmode)
800 op_mode = mode;
801 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
803 /* If we were really extending the mode,
804 we would have to distinguish between zero-extension
805 and sign-extension. */
806 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
807 val = arg0;
809 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
812 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
813 if (val
814 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
815 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
817 else
818 return 0;
819 break;
821 case SQRT:
822 case FLOAT_EXTEND:
823 case FLOAT_TRUNCATE:
824 case SS_TRUNCATE:
825 case US_TRUNCATE:
826 return 0;
828 default:
829 gcc_unreachable ();
832 val = trunc_int_for_mode (val, mode);
834 return GEN_INT (val);
837 /* We can do some operations on integer CONST_DOUBLEs. Also allow
838 for a DImode operation on a CONST_INT. */
839 else if (GET_MODE (op) == VOIDmode
840 && width <= HOST_BITS_PER_WIDE_INT * 2
841 && (GET_CODE (op) == CONST_DOUBLE
842 || GET_CODE (op) == CONST_INT))
844 unsigned HOST_WIDE_INT l1, lv;
845 HOST_WIDE_INT h1, hv;
847 if (GET_CODE (op) == CONST_DOUBLE)
848 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
849 else
850 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
852 switch (code)
854 case NOT:
855 lv = ~ l1;
856 hv = ~ h1;
857 break;
859 case NEG:
860 neg_double (l1, h1, &lv, &hv);
861 break;
863 case ABS:
864 if (h1 < 0)
865 neg_double (l1, h1, &lv, &hv);
866 else
867 lv = l1, hv = h1;
868 break;
870 case FFS:
871 hv = 0;
872 if (l1 == 0)
874 if (h1 == 0)
875 lv = 0;
876 else
877 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
879 else
880 lv = exact_log2 (l1 & -l1) + 1;
881 break;
883 case CLZ:
884 hv = 0;
885 if (h1 != 0)
886 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
887 - HOST_BITS_PER_WIDE_INT;
888 else if (l1 != 0)
889 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
890 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
891 lv = GET_MODE_BITSIZE (mode);
892 break;
894 case CTZ:
895 hv = 0;
896 if (l1 != 0)
897 lv = exact_log2 (l1 & -l1);
898 else if (h1 != 0)
899 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
900 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
901 lv = GET_MODE_BITSIZE (mode);
902 break;
904 case POPCOUNT:
905 hv = 0;
906 lv = 0;
907 while (l1)
908 lv++, l1 &= l1 - 1;
909 while (h1)
910 lv++, h1 &= h1 - 1;
911 break;
913 case PARITY:
914 hv = 0;
915 lv = 0;
916 while (l1)
917 lv++, l1 &= l1 - 1;
918 while (h1)
919 lv++, h1 &= h1 - 1;
920 lv &= 1;
921 break;
923 case TRUNCATE:
924 /* This is just a change-of-mode, so do nothing. */
925 lv = l1, hv = h1;
926 break;
928 case ZERO_EXTEND:
929 gcc_assert (op_mode != VOIDmode);
931 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
932 return 0;
934 hv = 0;
935 lv = l1 & GET_MODE_MASK (op_mode);
936 break;
938 case SIGN_EXTEND:
939 if (op_mode == VOIDmode
940 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
941 return 0;
942 else
944 lv = l1 & GET_MODE_MASK (op_mode);
945 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
946 && (lv & ((HOST_WIDE_INT) 1
947 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
948 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
950 hv = HWI_SIGN_EXTEND (lv);
952 break;
954 case SQRT:
955 return 0;
957 default:
958 return 0;
961 return immed_double_const (lv, hv, mode);
964 else if (GET_CODE (op) == CONST_DOUBLE
965 && GET_MODE_CLASS (mode) == MODE_FLOAT)
967 REAL_VALUE_TYPE d, t;
968 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
970 switch (code)
972 case SQRT:
973 if (HONOR_SNANS (mode) && real_isnan (&d))
974 return 0;
975 real_sqrt (&t, mode, &d);
976 d = t;
977 break;
978 case ABS:
979 d = REAL_VALUE_ABS (d);
980 break;
981 case NEG:
982 d = REAL_VALUE_NEGATE (d);
983 break;
984 case FLOAT_TRUNCATE:
985 d = real_value_truncate (mode, d);
986 break;
987 case FLOAT_EXTEND:
988 /* All this does is change the mode. */
989 break;
990 case FIX:
991 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
992 break;
993 case NOT:
995 long tmp[4];
996 int i;
998 real_to_target (tmp, &d, GET_MODE (op));
999 for (i = 0; i < 4; i++)
1000 tmp[i] = ~tmp[i];
1001 real_from_target (&d, tmp, mode);
1002 break;
1004 default:
1005 gcc_unreachable ();
1007 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1010 else if (GET_CODE (op) == CONST_DOUBLE
1011 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
1012 && GET_MODE_CLASS (mode) == MODE_INT
1013 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1015 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1016 operators are intentionally left unspecified (to ease implementation
1017 by target backends), for consistency, this routine implements the
1018 same semantics for constant folding as used by the middle-end. */
1020 /* This was formerly used only for non-IEEE float.
1021 eggert@twinsun.com says it is safe for IEEE also. */
1022 HOST_WIDE_INT xh, xl, th, tl;
1023 REAL_VALUE_TYPE x, t;
1024 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1025 switch (code)
1027 case FIX:
1028 if (REAL_VALUE_ISNAN (x))
1029 return const0_rtx;
1031 /* Test against the signed upper bound. */
1032 if (width > HOST_BITS_PER_WIDE_INT)
1034 th = ((unsigned HOST_WIDE_INT) 1
1035 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1036 tl = -1;
1038 else
1040 th = 0;
1041 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1043 real_from_integer (&t, VOIDmode, tl, th, 0);
1044 if (REAL_VALUES_LESS (t, x))
1046 xh = th;
1047 xl = tl;
1048 break;
1051 /* Test against the signed lower bound. */
1052 if (width > HOST_BITS_PER_WIDE_INT)
1054 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1055 tl = 0;
1057 else
1059 th = -1;
1060 tl = (HOST_WIDE_INT) -1 << (width - 1);
1062 real_from_integer (&t, VOIDmode, tl, th, 0);
1063 if (REAL_VALUES_LESS (x, t))
1065 xh = th;
1066 xl = tl;
1067 break;
1069 REAL_VALUE_TO_INT (&xl, &xh, x);
1070 break;
1072 case UNSIGNED_FIX:
1073 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1074 return const0_rtx;
1076 /* Test against the unsigned upper bound. */
1077 if (width == 2*HOST_BITS_PER_WIDE_INT)
1079 th = -1;
1080 tl = -1;
1082 else if (width >= HOST_BITS_PER_WIDE_INT)
1084 th = ((unsigned HOST_WIDE_INT) 1
1085 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1086 tl = -1;
1088 else
1090 th = 0;
1091 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1093 real_from_integer (&t, VOIDmode, tl, th, 1);
1094 if (REAL_VALUES_LESS (t, x))
1096 xh = th;
1097 xl = tl;
1098 break;
1101 REAL_VALUE_TO_INT (&xl, &xh, x);
1102 break;
1104 default:
1105 gcc_unreachable ();
1107 return immed_double_const (xl, xh, mode);
1110 return NULL_RTX;
1113 /* Subroutine of simplify_binary_operation to simplify a commutative,
1114 associative binary operation CODE with result mode MODE, operating
1115 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1116 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1117 canonicalization is possible. */
1119 static rtx
1120 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1121 rtx op0, rtx op1)
1123 rtx tem;
1125 /* Linearize the operator to the left. */
1126 if (GET_CODE (op1) == code)
1128 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1129 if (GET_CODE (op0) == code)
1131 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1132 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1135 /* "a op (b op c)" becomes "(b op c) op a". */
1136 if (! swap_commutative_operands_p (op1, op0))
1137 return simplify_gen_binary (code, mode, op1, op0);
1139 tem = op0;
1140 op0 = op1;
1141 op1 = tem;
1144 if (GET_CODE (op0) == code)
1146 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1147 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1149 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1150 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1153 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1154 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1155 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1156 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1157 if (tem != 0)
1158 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1160 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1161 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1162 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1163 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1164 if (tem != 0)
1165 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1168 return 0;
1172 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1173 and OP1. Return 0 if no simplification is possible.
1175 Don't use this for relational operations such as EQ or LT.
1176 Use simplify_relational_operation instead. */
1178 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1179 rtx op0, rtx op1)
1181 rtx trueop0, trueop1;
1182 rtx tem;
1184 /* Relational operations don't work here. We must know the mode
1185 of the operands in order to do the comparison correctly.
1186 Assuming a full word can give incorrect results.
1187 Consider comparing 128 with -128 in QImode. */
1188 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1189 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1191 /* Make sure the constant is second. */
1192 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1193 && swap_commutative_operands_p (op0, op1))
1195 tem = op0, op0 = op1, op1 = tem;
1198 trueop0 = avoid_constant_pool_reference (op0);
1199 trueop1 = avoid_constant_pool_reference (op1);
1201 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1202 if (tem)
1203 return tem;
1204 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1207 static rtx
1208 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1209 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1211 rtx tem;
1212 HOST_WIDE_INT val;
1213 unsigned int width = GET_MODE_BITSIZE (mode);
1215 /* Even if we can't compute a constant result,
1216 there are some cases worth simplifying. */
1218 switch (code)
1220 case PLUS:
1221 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1222 when x is NaN, infinite, or finite and nonzero. They aren't
1223 when x is -0 and the rounding mode is not towards -infinity,
1224 since (-0) + 0 is then 0. */
1225 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1226 return op0;
1228 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1229 transformations are safe even for IEEE. */
1230 if (GET_CODE (op0) == NEG)
1231 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1232 else if (GET_CODE (op1) == NEG)
1233 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1235 /* (~a) + 1 -> -a */
1236 if (INTEGRAL_MODE_P (mode)
1237 && GET_CODE (op0) == NOT
1238 && trueop1 == const1_rtx)
1239 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1241 /* Handle both-operands-constant cases. We can only add
1242 CONST_INTs to constants since the sum of relocatable symbols
1243 can't be handled by most assemblers. Don't add CONST_INT
1244 to CONST_INT since overflow won't be computed properly if wider
1245 than HOST_BITS_PER_WIDE_INT. */
1247 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1248 && GET_CODE (op1) == CONST_INT)
1249 return plus_constant (op0, INTVAL (op1));
1250 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1251 && GET_CODE (op0) == CONST_INT)
1252 return plus_constant (op1, INTVAL (op0));
1254 /* See if this is something like X * C - X or vice versa or
1255 if the multiplication is written as a shift. If so, we can
1256 distribute and make a new multiply, shift, or maybe just
1257 have X (if C is 2 in the example above). But don't make
1258 something more expensive than we had before. */
1260 if (! FLOAT_MODE_P (mode))
1262 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1263 rtx lhs = op0, rhs = op1;
1265 if (GET_CODE (lhs) == NEG)
1266 coeff0 = -1, lhs = XEXP (lhs, 0);
1267 else if (GET_CODE (lhs) == MULT
1268 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1269 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1270 else if (GET_CODE (lhs) == ASHIFT
1271 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1272 && INTVAL (XEXP (lhs, 1)) >= 0
1273 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1275 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1276 lhs = XEXP (lhs, 0);
1279 if (GET_CODE (rhs) == NEG)
1280 coeff1 = -1, rhs = XEXP (rhs, 0);
1281 else if (GET_CODE (rhs) == MULT
1282 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1284 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1286 else if (GET_CODE (rhs) == ASHIFT
1287 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1288 && INTVAL (XEXP (rhs, 1)) >= 0
1289 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1291 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1292 rhs = XEXP (rhs, 0);
1295 if (rtx_equal_p (lhs, rhs))
1297 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1298 tem = simplify_gen_binary (MULT, mode, lhs,
1299 GEN_INT (coeff0 + coeff1));
1300 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1301 ? tem : 0;
1305 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1306 if ((GET_CODE (op1) == CONST_INT
1307 || GET_CODE (op1) == CONST_DOUBLE)
1308 && GET_CODE (op0) == XOR
1309 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1310 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1311 && mode_signbit_p (mode, op1))
1312 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1313 simplify_gen_binary (XOR, mode, op1,
1314 XEXP (op0, 1)));
1316 /* If one of the operands is a PLUS or a MINUS, see if we can
1317 simplify this by the associative law.
1318 Don't use the associative law for floating point.
1319 The inaccuracy makes it nonassociative,
1320 and subtle programs can break if operations are associated. */
1322 if (INTEGRAL_MODE_P (mode)
1323 && (plus_minus_operand_p (op0)
1324 || plus_minus_operand_p (op1))
1325 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1326 return tem;
1328 /* Reassociate floating point addition only when the user
1329 specifies unsafe math optimizations. */
1330 if (FLOAT_MODE_P (mode)
1331 && flag_unsafe_math_optimizations)
1333 tem = simplify_associative_operation (code, mode, op0, op1);
1334 if (tem)
1335 return tem;
1337 break;
1339 case COMPARE:
1340 #ifdef HAVE_cc0
1341 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1342 using cc0, in which case we want to leave it as a COMPARE
1343 so we can distinguish it from a register-register-copy.
1345 In IEEE floating point, x-0 is not the same as x. */
1347 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1348 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1349 && trueop1 == CONST0_RTX (mode))
1350 return op0;
1351 #endif
1353 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1354 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1355 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1356 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1358 rtx xop00 = XEXP (op0, 0);
1359 rtx xop10 = XEXP (op1, 0);
1361 #ifdef HAVE_cc0
1362 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1363 #else
1364 if (REG_P (xop00) && REG_P (xop10)
1365 && GET_MODE (xop00) == GET_MODE (xop10)
1366 && REGNO (xop00) == REGNO (xop10)
1367 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1368 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1369 #endif
1370 return xop00;
1372 break;
1374 case MINUS:
1375 /* We can't assume x-x is 0 even with non-IEEE floating point,
1376 but since it is zero except in very strange circumstances, we
1377 will treat it as zero with -funsafe-math-optimizations. */
1378 if (rtx_equal_p (trueop0, trueop1)
1379 && ! side_effects_p (op0)
1380 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1381 return CONST0_RTX (mode);
1383 /* Change subtraction from zero into negation. (0 - x) is the
1384 same as -x when x is NaN, infinite, or finite and nonzero.
1385 But if the mode has signed zeros, and does not round towards
1386 -infinity, then 0 - 0 is 0, not -0. */
1387 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1388 return simplify_gen_unary (NEG, mode, op1, mode);
1390 /* (-1 - a) is ~a. */
1391 if (trueop0 == constm1_rtx)
1392 return simplify_gen_unary (NOT, mode, op1, mode);
1394 /* Subtracting 0 has no effect unless the mode has signed zeros
1395 and supports rounding towards -infinity. In such a case,
1396 0 - 0 is -0. */
1397 if (!(HONOR_SIGNED_ZEROS (mode)
1398 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1399 && trueop1 == CONST0_RTX (mode))
1400 return op0;
1402 /* See if this is something like X * C - X or vice versa or
1403 if the multiplication is written as a shift. If so, we can
1404 distribute and make a new multiply, shift, or maybe just
1405 have X (if C is 2 in the example above). But don't make
1406 something more expensive than we had before. */
1408 if (! FLOAT_MODE_P (mode))
1410 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1411 rtx lhs = op0, rhs = op1;
1413 if (GET_CODE (lhs) == NEG)
1414 coeff0 = -1, lhs = XEXP (lhs, 0);
1415 else if (GET_CODE (lhs) == MULT
1416 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1418 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1420 else if (GET_CODE (lhs) == ASHIFT
1421 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1422 && INTVAL (XEXP (lhs, 1)) >= 0
1423 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1425 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1426 lhs = XEXP (lhs, 0);
1429 if (GET_CODE (rhs) == NEG)
1430 coeff1 = - 1, rhs = XEXP (rhs, 0);
1431 else if (GET_CODE (rhs) == MULT
1432 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1434 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1436 else if (GET_CODE (rhs) == ASHIFT
1437 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1438 && INTVAL (XEXP (rhs, 1)) >= 0
1439 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1441 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1442 rhs = XEXP (rhs, 0);
1445 if (rtx_equal_p (lhs, rhs))
1447 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1448 tem = simplify_gen_binary (MULT, mode, lhs,
1449 GEN_INT (coeff0 - coeff1));
1450 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1451 ? tem : 0;
1455 /* (a - (-b)) -> (a + b). True even for IEEE. */
1456 if (GET_CODE (op1) == NEG)
1457 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1459 /* (-x - c) may be simplified as (-c - x). */
1460 if (GET_CODE (op0) == NEG
1461 && (GET_CODE (op1) == CONST_INT
1462 || GET_CODE (op1) == CONST_DOUBLE))
1464 tem = simplify_unary_operation (NEG, mode, op1, mode);
1465 if (tem)
1466 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1469 /* If one of the operands is a PLUS or a MINUS, see if we can
1470 simplify this by the associative law.
1471 Don't use the associative law for floating point.
1472 The inaccuracy makes it nonassociative,
1473 and subtle programs can break if operations are associated. */
1475 if (INTEGRAL_MODE_P (mode)
1476 && (plus_minus_operand_p (op0)
1477 || plus_minus_operand_p (op1))
1478 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1479 return tem;
1481 /* Don't let a relocatable value get a negative coeff. */
1482 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1483 return simplify_gen_binary (PLUS, mode,
1484 op0,
1485 neg_const_int (mode, op1));
1487 /* (x - (x & y)) -> (x & ~y) */
1488 if (GET_CODE (op1) == AND)
1490 if (rtx_equal_p (op0, XEXP (op1, 0)))
1492 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1493 GET_MODE (XEXP (op1, 1)));
1494 return simplify_gen_binary (AND, mode, op0, tem);
1496 if (rtx_equal_p (op0, XEXP (op1, 1)))
1498 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1499 GET_MODE (XEXP (op1, 0)));
1500 return simplify_gen_binary (AND, mode, op0, tem);
1503 break;
1505 case MULT:
1506 if (trueop1 == constm1_rtx)
1507 return simplify_gen_unary (NEG, mode, op0, mode);
1509 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1510 x is NaN, since x * 0 is then also NaN. Nor is it valid
1511 when the mode has signed zeros, since multiplying a negative
1512 number by 0 will give -0, not 0. */
1513 if (!HONOR_NANS (mode)
1514 && !HONOR_SIGNED_ZEROS (mode)
1515 && trueop1 == CONST0_RTX (mode)
1516 && ! side_effects_p (op0))
1517 return op1;
1519 /* In IEEE floating point, x*1 is not equivalent to x for
1520 signalling NaNs. */
1521 if (!HONOR_SNANS (mode)
1522 && trueop1 == CONST1_RTX (mode))
1523 return op0;
1525 /* Convert multiply by constant power of two into shift unless
1526 we are still generating RTL. This test is a kludge. */
1527 if (GET_CODE (trueop1) == CONST_INT
1528 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1529 /* If the mode is larger than the host word size, and the
1530 uppermost bit is set, then this isn't a power of two due
1531 to implicit sign extension. */
1532 && (width <= HOST_BITS_PER_WIDE_INT
1533 || val != HOST_BITS_PER_WIDE_INT - 1))
1534 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1536 /* x*2 is x+x and x*(-1) is -x */
1537 if (GET_CODE (trueop1) == CONST_DOUBLE
1538 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1539 && GET_MODE (op0) == mode)
1541 REAL_VALUE_TYPE d;
1542 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1544 if (REAL_VALUES_EQUAL (d, dconst2))
1545 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1547 if (REAL_VALUES_EQUAL (d, dconstm1))
1548 return simplify_gen_unary (NEG, mode, op0, mode);
1551 /* Reassociate multiplication, but for floating point MULTs
1552 only when the user specifies unsafe math optimizations. */
1553 if (! FLOAT_MODE_P (mode)
1554 || flag_unsafe_math_optimizations)
1556 tem = simplify_associative_operation (code, mode, op0, op1);
1557 if (tem)
1558 return tem;
1560 break;
1562 case IOR:
1563 if (trueop1 == const0_rtx)
1564 return op0;
1565 if (GET_CODE (trueop1) == CONST_INT
1566 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1567 == GET_MODE_MASK (mode)))
1568 return op1;
1569 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1570 return op0;
1571 /* A | (~A) -> -1 */
1572 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1573 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1574 && ! side_effects_p (op0)
1575 && GET_MODE_CLASS (mode) != MODE_CC)
1576 return constm1_rtx;
1577 tem = simplify_associative_operation (code, mode, op0, op1);
1578 if (tem)
1579 return tem;
1580 break;
1582 case XOR:
1583 if (trueop1 == const0_rtx)
1584 return op0;
1585 if (GET_CODE (trueop1) == CONST_INT
1586 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1587 == GET_MODE_MASK (mode)))
1588 return simplify_gen_unary (NOT, mode, op0, mode);
1589 if (trueop0 == trueop1
1590 && ! side_effects_p (op0)
1591 && GET_MODE_CLASS (mode) != MODE_CC)
1592 return const0_rtx;
1594 /* Canonicalize XOR of the most significant bit to PLUS. */
1595 if ((GET_CODE (op1) == CONST_INT
1596 || GET_CODE (op1) == CONST_DOUBLE)
1597 && mode_signbit_p (mode, op1))
1598 return simplify_gen_binary (PLUS, mode, op0, op1);
1599 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1600 if ((GET_CODE (op1) == CONST_INT
1601 || GET_CODE (op1) == CONST_DOUBLE)
1602 && GET_CODE (op0) == PLUS
1603 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1604 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1605 && mode_signbit_p (mode, XEXP (op0, 1)))
1606 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1607 simplify_gen_binary (XOR, mode, op1,
1608 XEXP (op0, 1)));
1610 tem = simplify_associative_operation (code, mode, op0, op1);
1611 if (tem)
1612 return tem;
1613 break;
1615 case AND:
1616 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1617 return const0_rtx;
1618 /* If we are turning off bits already known off in OP0, we need
1619 not do an AND. */
1620 if (GET_CODE (trueop1) == CONST_INT
1621 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1622 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1623 return op0;
1624 if (trueop0 == trueop1 && ! side_effects_p (op0)
1625 && GET_MODE_CLASS (mode) != MODE_CC)
1626 return op0;
1627 /* A & (~A) -> 0 */
1628 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1629 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1630 && ! side_effects_p (op0)
1631 && GET_MODE_CLASS (mode) != MODE_CC)
1632 return const0_rtx;
1634 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1635 there are no nonzero bits of C outside of X's mode. */
1636 if ((GET_CODE (op0) == SIGN_EXTEND
1637 || GET_CODE (op0) == ZERO_EXTEND)
1638 && GET_CODE (trueop1) == CONST_INT
1639 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1640 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1641 & INTVAL (trueop1)) == 0)
1643 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1644 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1645 gen_int_mode (INTVAL (trueop1),
1646 imode));
1647 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1650 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1651 ((A & N) + B) & M -> (A + B) & M
1652 Similarly if (N & M) == 0,
1653 ((A | N) + B) & M -> (A + B) & M
1654 and for - instead of + and/or ^ instead of |. */
1655 if (GET_CODE (trueop1) == CONST_INT
1656 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1657 && ~INTVAL (trueop1)
1658 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1659 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1661 rtx pmop[2];
1662 int which;
1664 pmop[0] = XEXP (op0, 0);
1665 pmop[1] = XEXP (op0, 1);
1667 for (which = 0; which < 2; which++)
1669 tem = pmop[which];
1670 switch (GET_CODE (tem))
1672 case AND:
1673 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1674 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1675 == INTVAL (trueop1))
1676 pmop[which] = XEXP (tem, 0);
1677 break;
1678 case IOR:
1679 case XOR:
1680 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1681 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1682 pmop[which] = XEXP (tem, 0);
1683 break;
1684 default:
1685 break;
1689 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1691 tem = simplify_gen_binary (GET_CODE (op0), mode,
1692 pmop[0], pmop[1]);
1693 return simplify_gen_binary (code, mode, tem, op1);
1696 tem = simplify_associative_operation (code, mode, op0, op1);
1697 if (tem)
1698 return tem;
1699 break;
1701 case UDIV:
1702 /* 0/x is 0 (or x&0 if x has side-effects). */
1703 if (trueop0 == const0_rtx)
1704 return side_effects_p (op1)
1705 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1706 : const0_rtx;
1707 /* x/1 is x. */
1708 if (trueop1 == const1_rtx)
1710 /* Handle narrowing UDIV. */
1711 rtx x = gen_lowpart_common (mode, op0);
1712 if (x)
1713 return x;
1714 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1715 return gen_lowpart_SUBREG (mode, op0);
1716 return op0;
1718 /* Convert divide by power of two into shift. */
1719 if (GET_CODE (trueop1) == CONST_INT
1720 && (val = exact_log2 (INTVAL (trueop1))) > 0)
1721 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
1722 break;
1724 case DIV:
1725 /* Handle floating point and integers separately. */
1726 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1728 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1729 safe for modes with NaNs, since 0.0 / 0.0 will then be
1730 NaN rather than 0.0. Nor is it safe for modes with signed
1731 zeros, since dividing 0 by a negative number gives -0.0 */
1732 if (trueop0 == CONST0_RTX (mode)
1733 && !HONOR_NANS (mode)
1734 && !HONOR_SIGNED_ZEROS (mode)
1735 && ! side_effects_p (op1))
1736 return op0;
1737 /* x/1.0 is x. */
1738 if (trueop1 == CONST1_RTX (mode)
1739 && !HONOR_SNANS (mode))
1740 return op0;
1742 if (GET_CODE (trueop1) == CONST_DOUBLE
1743 && trueop1 != CONST0_RTX (mode))
1745 REAL_VALUE_TYPE d;
1746 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1748 /* x/-1.0 is -x. */
1749 if (REAL_VALUES_EQUAL (d, dconstm1)
1750 && !HONOR_SNANS (mode))
1751 return simplify_gen_unary (NEG, mode, op0, mode);
1753 /* Change FP division by a constant into multiplication.
1754 Only do this with -funsafe-math-optimizations. */
1755 if (flag_unsafe_math_optimizations
1756 && !REAL_VALUES_EQUAL (d, dconst0))
1758 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1759 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1760 return simplify_gen_binary (MULT, mode, op0, tem);
1764 else
1766 /* 0/x is 0 (or x&0 if x has side-effects). */
1767 if (trueop0 == const0_rtx)
1768 return side_effects_p (op1)
1769 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1770 : const0_rtx;
1771 /* x/1 is x. */
1772 if (trueop1 == const1_rtx)
1774 /* Handle narrowing DIV. */
1775 rtx x = gen_lowpart_common (mode, op0);
1776 if (x)
1777 return x;
1778 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1779 return gen_lowpart_SUBREG (mode, op0);
1780 return op0;
1782 /* x/-1 is -x. */
1783 if (trueop1 == constm1_rtx)
1785 rtx x = gen_lowpart_common (mode, op0);
1786 if (!x)
1787 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1788 ? gen_lowpart_SUBREG (mode, op0) : op0;
1789 return simplify_gen_unary (NEG, mode, x, mode);
1792 break;
1794 case UMOD:
1795 /* 0%x is 0 (or x&0 if x has side-effects). */
1796 if (trueop0 == const0_rtx)
1797 return side_effects_p (op1)
1798 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1799 : const0_rtx;
1800 /* x%1 is 0 (of x&0 if x has side-effects). */
1801 if (trueop1 == const1_rtx)
1802 return side_effects_p (op0)
1803 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
1804 : const0_rtx;
1805 /* Implement modulus by power of two as AND. */
1806 if (GET_CODE (trueop1) == CONST_INT
1807 && exact_log2 (INTVAL (trueop1)) > 0)
1808 return simplify_gen_binary (AND, mode, op0,
1809 GEN_INT (INTVAL (op1) - 1));
1810 break;
1812 case MOD:
1813 /* 0%x is 0 (or x&0 if x has side-effects). */
1814 if (trueop0 == const0_rtx)
1815 return side_effects_p (op1)
1816 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1817 : const0_rtx;
1818 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1819 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
1820 return side_effects_p (op0)
1821 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
1822 : const0_rtx;
1823 break;
1825 case ROTATERT:
1826 case ROTATE:
1827 case ASHIFTRT:
1828 /* Rotating ~0 always results in ~0. */
1829 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1830 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1831 && ! side_effects_p (op1))
1832 return op0;
1834 /* Fall through.... */
1836 case ASHIFT:
1837 case LSHIFTRT:
1838 if (trueop1 == const0_rtx)
1839 return op0;
1840 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1841 return op0;
1842 break;
1844 case SMIN:
1845 if (width <= HOST_BITS_PER_WIDE_INT
1846 && GET_CODE (trueop1) == CONST_INT
1847 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1848 && ! side_effects_p (op0))
1849 return op1;
1850 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1851 return op0;
1852 tem = simplify_associative_operation (code, mode, op0, op1);
1853 if (tem)
1854 return tem;
1855 break;
1857 case SMAX:
1858 if (width <= HOST_BITS_PER_WIDE_INT
1859 && GET_CODE (trueop1) == CONST_INT
1860 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1861 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1862 && ! side_effects_p (op0))
1863 return op1;
1864 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1865 return op0;
1866 tem = simplify_associative_operation (code, mode, op0, op1);
1867 if (tem)
1868 return tem;
1869 break;
1871 case UMIN:
1872 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1873 return op1;
1874 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1875 return op0;
1876 tem = simplify_associative_operation (code, mode, op0, op1);
1877 if (tem)
1878 return tem;
1879 break;
1881 case UMAX:
1882 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1883 return op1;
1884 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1885 return op0;
1886 tem = simplify_associative_operation (code, mode, op0, op1);
1887 if (tem)
1888 return tem;
1889 break;
1891 case SS_PLUS:
1892 case US_PLUS:
1893 case SS_MINUS:
1894 case US_MINUS:
1895 /* ??? There are simplifications that can be done. */
1896 return 0;
1898 case VEC_SELECT:
1899 if (!VECTOR_MODE_P (mode))
1901 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1902 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
1903 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1904 gcc_assert (XVECLEN (trueop1, 0) == 1);
1905 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
1907 if (GET_CODE (trueop0) == CONST_VECTOR)
1908 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
1909 (trueop1, 0, 0)));
1911 else
1913 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1914 gcc_assert (GET_MODE_INNER (mode)
1915 == GET_MODE_INNER (GET_MODE (trueop0)));
1916 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1918 if (GET_CODE (trueop0) == CONST_VECTOR)
1920 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1921 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1922 rtvec v = rtvec_alloc (n_elts);
1923 unsigned int i;
1925 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
1926 for (i = 0; i < n_elts; i++)
1928 rtx x = XVECEXP (trueop1, 0, i);
1930 gcc_assert (GET_CODE (x) == CONST_INT);
1931 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
1932 INTVAL (x));
1935 return gen_rtx_CONST_VECTOR (mode, v);
1938 return 0;
1939 case VEC_CONCAT:
1941 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1942 ? GET_MODE (trueop0)
1943 : GET_MODE_INNER (mode));
1944 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1945 ? GET_MODE (trueop1)
1946 : GET_MODE_INNER (mode));
1948 gcc_assert (VECTOR_MODE_P (mode));
1949 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1950 == GET_MODE_SIZE (mode));
1952 if (VECTOR_MODE_P (op0_mode))
1953 gcc_assert (GET_MODE_INNER (mode)
1954 == GET_MODE_INNER (op0_mode));
1955 else
1956 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
1958 if (VECTOR_MODE_P (op1_mode))
1959 gcc_assert (GET_MODE_INNER (mode)
1960 == GET_MODE_INNER (op1_mode));
1961 else
1962 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
1964 if ((GET_CODE (trueop0) == CONST_VECTOR
1965 || GET_CODE (trueop0) == CONST_INT
1966 || GET_CODE (trueop0) == CONST_DOUBLE)
1967 && (GET_CODE (trueop1) == CONST_VECTOR
1968 || GET_CODE (trueop1) == CONST_INT
1969 || GET_CODE (trueop1) == CONST_DOUBLE))
1971 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1972 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1973 rtvec v = rtvec_alloc (n_elts);
1974 unsigned int i;
1975 unsigned in_n_elts = 1;
1977 if (VECTOR_MODE_P (op0_mode))
1978 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1979 for (i = 0; i < n_elts; i++)
1981 if (i < in_n_elts)
1983 if (!VECTOR_MODE_P (op0_mode))
1984 RTVEC_ELT (v, i) = trueop0;
1985 else
1986 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1988 else
1990 if (!VECTOR_MODE_P (op1_mode))
1991 RTVEC_ELT (v, i) = trueop1;
1992 else
1993 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1994 i - in_n_elts);
1998 return gen_rtx_CONST_VECTOR (mode, v);
2001 return 0;
2003 default:
2004 gcc_unreachable ();
2007 return 0;
2011 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2012 rtx op0, rtx op1)
2014 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2015 HOST_WIDE_INT val;
2016 unsigned int width = GET_MODE_BITSIZE (mode);
2018 if (VECTOR_MODE_P (mode)
2019 && code != VEC_CONCAT
2020 && GET_CODE (op0) == CONST_VECTOR
2021 && GET_CODE (op1) == CONST_VECTOR)
2023 unsigned n_elts = GET_MODE_NUNITS (mode);
2024 enum machine_mode op0mode = GET_MODE (op0);
2025 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2026 enum machine_mode op1mode = GET_MODE (op1);
2027 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2028 rtvec v = rtvec_alloc (n_elts);
2029 unsigned int i;
2031 gcc_assert (op0_n_elts == n_elts);
2032 gcc_assert (op1_n_elts == n_elts);
2033 for (i = 0; i < n_elts; i++)
2035 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2036 CONST_VECTOR_ELT (op0, i),
2037 CONST_VECTOR_ELT (op1, i));
2038 if (!x)
2039 return 0;
2040 RTVEC_ELT (v, i) = x;
2043 return gen_rtx_CONST_VECTOR (mode, v);
2046 if (VECTOR_MODE_P (mode)
2047 && code == VEC_CONCAT
2048 && CONSTANT_P (op0) && CONSTANT_P (op1))
2050 unsigned n_elts = GET_MODE_NUNITS (mode);
2051 rtvec v = rtvec_alloc (n_elts);
2053 gcc_assert (n_elts >= 2);
2054 if (n_elts == 2)
2056 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2057 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2059 RTVEC_ELT (v, 0) = op0;
2060 RTVEC_ELT (v, 1) = op1;
2062 else
2064 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2065 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2066 unsigned i;
2068 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2069 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2070 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2072 for (i = 0; i < op0_n_elts; ++i)
2073 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2074 for (i = 0; i < op1_n_elts; ++i)
2075 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2078 return gen_rtx_CONST_VECTOR (mode, v);
2081 if (GET_MODE_CLASS (mode) == MODE_FLOAT
2082 && GET_CODE (op0) == CONST_DOUBLE
2083 && GET_CODE (op1) == CONST_DOUBLE
2084 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2086 if (code == AND
2087 || code == IOR
2088 || code == XOR)
2090 long tmp0[4];
2091 long tmp1[4];
2092 REAL_VALUE_TYPE r;
2093 int i;
2095 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2096 GET_MODE (op0));
2097 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2098 GET_MODE (op1));
2099 for (i = 0; i < 4; i++)
2101 switch (code)
2103 case AND:
2104 tmp0[i] &= tmp1[i];
2105 break;
2106 case IOR:
2107 tmp0[i] |= tmp1[i];
2108 break;
2109 case XOR:
2110 tmp0[i] ^= tmp1[i];
2111 break;
2112 default:
2113 gcc_unreachable ();
2116 real_from_target (&r, tmp0, mode);
2117 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2119 else
2121 REAL_VALUE_TYPE f0, f1, value, result;
2122 bool inexact;
2124 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2125 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2126 real_convert (&f0, mode, &f0);
2127 real_convert (&f1, mode, &f1);
2129 if (HONOR_SNANS (mode)
2130 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2131 return 0;
2133 if (code == DIV
2134 && REAL_VALUES_EQUAL (f1, dconst0)
2135 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2136 return 0;
2138 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2139 && flag_trapping_math
2140 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2142 int s0 = REAL_VALUE_NEGATIVE (f0);
2143 int s1 = REAL_VALUE_NEGATIVE (f1);
2145 switch (code)
2147 case PLUS:
2148 /* Inf + -Inf = NaN plus exception. */
2149 if (s0 != s1)
2150 return 0;
2151 break;
2152 case MINUS:
2153 /* Inf - Inf = NaN plus exception. */
2154 if (s0 == s1)
2155 return 0;
2156 break;
2157 case DIV:
2158 /* Inf / Inf = NaN plus exception. */
2159 return 0;
2160 default:
2161 break;
2165 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2166 && flag_trapping_math
2167 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2168 || (REAL_VALUE_ISINF (f1)
2169 && REAL_VALUES_EQUAL (f0, dconst0))))
2170 /* Inf * 0 = NaN plus exception. */
2171 return 0;
2173 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2174 &f0, &f1);
2175 real_convert (&result, mode, &value);
2177 /* Don't constant fold this floating point operation if the
2178 result may dependent upon the run-time rounding mode and
2179 flag_rounding_math is set, or if GCC's software emulation
2180 is unable to accurately represent the result. */
2182 if ((flag_rounding_math
2183 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2184 && !flag_unsafe_math_optimizations))
2185 && (inexact || !real_identical (&result, &value)))
2186 return NULL_RTX;
2188 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2192 /* We can fold some multi-word operations. */
2193 if (GET_MODE_CLASS (mode) == MODE_INT
2194 && width == HOST_BITS_PER_WIDE_INT * 2
2195 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2196 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2198 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2199 HOST_WIDE_INT h1, h2, hv, ht;
2201 if (GET_CODE (op0) == CONST_DOUBLE)
2202 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2203 else
2204 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2206 if (GET_CODE (op1) == CONST_DOUBLE)
2207 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2208 else
2209 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2211 switch (code)
2213 case MINUS:
2214 /* A - B == A + (-B). */
2215 neg_double (l2, h2, &lv, &hv);
2216 l2 = lv, h2 = hv;
2218 /* Fall through.... */
2220 case PLUS:
2221 add_double (l1, h1, l2, h2, &lv, &hv);
2222 break;
2224 case MULT:
2225 mul_double (l1, h1, l2, h2, &lv, &hv);
2226 break;
2228 case DIV:
2229 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2230 &lv, &hv, &lt, &ht))
2231 return 0;
2232 break;
2234 case MOD:
2235 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2236 &lt, &ht, &lv, &hv))
2237 return 0;
2238 break;
2240 case UDIV:
2241 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2242 &lv, &hv, &lt, &ht))
2243 return 0;
2244 break;
2246 case UMOD:
2247 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2248 &lt, &ht, &lv, &hv))
2249 return 0;
2250 break;
2252 case AND:
2253 lv = l1 & l2, hv = h1 & h2;
2254 break;
2256 case IOR:
2257 lv = l1 | l2, hv = h1 | h2;
2258 break;
2260 case XOR:
2261 lv = l1 ^ l2, hv = h1 ^ h2;
2262 break;
2264 case SMIN:
2265 if (h1 < h2
2266 || (h1 == h2
2267 && ((unsigned HOST_WIDE_INT) l1
2268 < (unsigned HOST_WIDE_INT) l2)))
2269 lv = l1, hv = h1;
2270 else
2271 lv = l2, hv = h2;
2272 break;
2274 case SMAX:
2275 if (h1 > h2
2276 || (h1 == h2
2277 && ((unsigned HOST_WIDE_INT) l1
2278 > (unsigned HOST_WIDE_INT) l2)))
2279 lv = l1, hv = h1;
2280 else
2281 lv = l2, hv = h2;
2282 break;
2284 case UMIN:
2285 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2286 || (h1 == h2
2287 && ((unsigned HOST_WIDE_INT) l1
2288 < (unsigned HOST_WIDE_INT) l2)))
2289 lv = l1, hv = h1;
2290 else
2291 lv = l2, hv = h2;
2292 break;
2294 case UMAX:
2295 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2296 || (h1 == h2
2297 && ((unsigned HOST_WIDE_INT) l1
2298 > (unsigned HOST_WIDE_INT) l2)))
2299 lv = l1, hv = h1;
2300 else
2301 lv = l2, hv = h2;
2302 break;
2304 case LSHIFTRT: case ASHIFTRT:
2305 case ASHIFT:
2306 case ROTATE: case ROTATERT:
2307 if (SHIFT_COUNT_TRUNCATED)
2308 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2310 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2311 return 0;
2313 if (code == LSHIFTRT || code == ASHIFTRT)
2314 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2315 code == ASHIFTRT);
2316 else if (code == ASHIFT)
2317 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2318 else if (code == ROTATE)
2319 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2320 else /* code == ROTATERT */
2321 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2322 break;
2324 default:
2325 return 0;
2328 return immed_double_const (lv, hv, mode);
2331 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2332 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2334 /* Get the integer argument values in two forms:
2335 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2337 arg0 = INTVAL (op0);
2338 arg1 = INTVAL (op1);
2340 if (width < HOST_BITS_PER_WIDE_INT)
2342 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2343 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2345 arg0s = arg0;
2346 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2347 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2349 arg1s = arg1;
2350 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2351 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2353 else
2355 arg0s = arg0;
2356 arg1s = arg1;
2359 /* Compute the value of the arithmetic. */
2361 switch (code)
2363 case PLUS:
2364 val = arg0s + arg1s;
2365 break;
2367 case MINUS:
2368 val = arg0s - arg1s;
2369 break;
2371 case MULT:
2372 val = arg0s * arg1s;
2373 break;
2375 case DIV:
2376 if (arg1s == 0
2377 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2378 && arg1s == -1))
2379 return 0;
2380 val = arg0s / arg1s;
2381 break;
2383 case MOD:
2384 if (arg1s == 0
2385 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2386 && arg1s == -1))
2387 return 0;
2388 val = arg0s % arg1s;
2389 break;
2391 case UDIV:
2392 if (arg1 == 0
2393 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2394 && arg1s == -1))
2395 return 0;
2396 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2397 break;
2399 case UMOD:
2400 if (arg1 == 0
2401 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2402 && arg1s == -1))
2403 return 0;
2404 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2405 break;
2407 case AND:
2408 val = arg0 & arg1;
2409 break;
2411 case IOR:
2412 val = arg0 | arg1;
2413 break;
2415 case XOR:
2416 val = arg0 ^ arg1;
2417 break;
2419 case LSHIFTRT:
2420 case ASHIFT:
2421 case ASHIFTRT:
2422 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
2423 the value is in range. We can't return any old value for
2424 out-of-range arguments because either the middle-end (via
2425 shift_truncation_mask) or the back-end might be relying on
2426 target-specific knowledge. Nor can we rely on
2427 shift_truncation_mask, since the shift might not be part of an
2428 ashlM3, lshrM3 or ashrM3 instruction. */
2429 if (SHIFT_COUNT_TRUNCATED)
2430 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2431 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2432 return 0;
2434 val = (code == ASHIFT
2435 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2436 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2438 /* Sign-extend the result for arithmetic right shifts. */
2439 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2440 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2441 break;
2443 case ROTATERT:
2444 if (arg1 < 0)
2445 return 0;
2447 arg1 %= width;
2448 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2449 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2450 break;
2452 case ROTATE:
2453 if (arg1 < 0)
2454 return 0;
2456 arg1 %= width;
2457 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2458 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2459 break;
2461 case COMPARE:
2462 /* Do nothing here. */
2463 return 0;
2465 case SMIN:
2466 val = arg0s <= arg1s ? arg0s : arg1s;
2467 break;
2469 case UMIN:
2470 val = ((unsigned HOST_WIDE_INT) arg0
2471 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2472 break;
2474 case SMAX:
2475 val = arg0s > arg1s ? arg0s : arg1s;
2476 break;
2478 case UMAX:
2479 val = ((unsigned HOST_WIDE_INT) arg0
2480 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2481 break;
2483 case SS_PLUS:
2484 case US_PLUS:
2485 case SS_MINUS:
2486 case US_MINUS:
2487 /* ??? There are simplifications that can be done. */
2488 return 0;
2490 default:
2491 gcc_unreachable ();
2494 val = trunc_int_for_mode (val, mode);
2495 return GEN_INT (val);
2498 return NULL_RTX;
2503 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2504 PLUS or MINUS.
2506 Rather than test for specific case, we do this by a brute-force method
2507 and do all possible simplifications until no more changes occur. Then
2508 we rebuild the operation.
2510 If FORCE is true, then always generate the rtx. This is used to
2511 canonicalize stuff emitted from simplify_gen_binary. Note that this
2512 can still fail if the rtx is too complex. It won't fail just because
2513 the result is not 'simpler' than the input, however. */
2515 struct simplify_plus_minus_op_data
2517 rtx op;
2518 int neg;
2521 static int
2522 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2524 const struct simplify_plus_minus_op_data *d1 = p1;
2525 const struct simplify_plus_minus_op_data *d2 = p2;
2527 return (commutative_operand_precedence (d2->op)
2528 - commutative_operand_precedence (d1->op));
2531 static rtx
2532 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2533 rtx op1, int force)
2535 struct simplify_plus_minus_op_data ops[8];
2536 rtx result, tem;
2537 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2538 int first, changed;
2539 int i, j;
2541 memset (ops, 0, sizeof ops);
2543 /* Set up the two operands and then expand them until nothing has been
2544 changed. If we run out of room in our array, give up; this should
2545 almost never happen. */
2547 ops[0].op = op0;
2548 ops[0].neg = 0;
2549 ops[1].op = op1;
2550 ops[1].neg = (code == MINUS);
2554 changed = 0;
2556 for (i = 0; i < n_ops; i++)
2558 rtx this_op = ops[i].op;
2559 int this_neg = ops[i].neg;
2560 enum rtx_code this_code = GET_CODE (this_op);
2562 switch (this_code)
2564 case PLUS:
2565 case MINUS:
2566 if (n_ops == 7)
2567 return NULL_RTX;
2569 ops[n_ops].op = XEXP (this_op, 1);
2570 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2571 n_ops++;
2573 ops[i].op = XEXP (this_op, 0);
2574 input_ops++;
2575 changed = 1;
2576 break;
2578 case NEG:
2579 ops[i].op = XEXP (this_op, 0);
2580 ops[i].neg = ! this_neg;
2581 changed = 1;
2582 break;
2584 case CONST:
2585 if (n_ops < 7
2586 && GET_CODE (XEXP (this_op, 0)) == PLUS
2587 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2588 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2590 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2591 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2592 ops[n_ops].neg = this_neg;
2593 n_ops++;
2594 input_consts++;
2595 changed = 1;
2597 break;
2599 case NOT:
2600 /* ~a -> (-a - 1) */
2601 if (n_ops != 7)
2603 ops[n_ops].op = constm1_rtx;
2604 ops[n_ops++].neg = this_neg;
2605 ops[i].op = XEXP (this_op, 0);
2606 ops[i].neg = !this_neg;
2607 changed = 1;
2609 break;
2611 case CONST_INT:
2612 if (this_neg)
2614 ops[i].op = neg_const_int (mode, this_op);
2615 ops[i].neg = 0;
2616 changed = 1;
2618 break;
2620 default:
2621 break;
2625 while (changed);
2627 /* If we only have two operands, we can't do anything. */
2628 if (n_ops <= 2 && !force)
2629 return NULL_RTX;
2631 /* Count the number of CONSTs we didn't split above. */
2632 for (i = 0; i < n_ops; i++)
2633 if (GET_CODE (ops[i].op) == CONST)
2634 input_consts++;
2636 /* Now simplify each pair of operands until nothing changes. The first
2637 time through just simplify constants against each other. */
2639 first = 1;
2642 changed = first;
2644 for (i = 0; i < n_ops - 1; i++)
2645 for (j = i + 1; j < n_ops; j++)
2647 rtx lhs = ops[i].op, rhs = ops[j].op;
2648 int lneg = ops[i].neg, rneg = ops[j].neg;
2650 if (lhs != 0 && rhs != 0
2651 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2653 enum rtx_code ncode = PLUS;
2655 if (lneg != rneg)
2657 ncode = MINUS;
2658 if (lneg)
2659 tem = lhs, lhs = rhs, rhs = tem;
2661 else if (swap_commutative_operands_p (lhs, rhs))
2662 tem = lhs, lhs = rhs, rhs = tem;
2664 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2666 /* Reject "simplifications" that just wrap the two
2667 arguments in a CONST. Failure to do so can result
2668 in infinite recursion with simplify_binary_operation
2669 when it calls us to simplify CONST operations. */
2670 if (tem
2671 && ! (GET_CODE (tem) == CONST
2672 && GET_CODE (XEXP (tem, 0)) == ncode
2673 && XEXP (XEXP (tem, 0), 0) == lhs
2674 && XEXP (XEXP (tem, 0), 1) == rhs)
2675 /* Don't allow -x + -1 -> ~x simplifications in the
2676 first pass. This allows us the chance to combine
2677 the -1 with other constants. */
2678 && ! (first
2679 && GET_CODE (tem) == NOT
2680 && XEXP (tem, 0) == rhs))
2682 lneg &= rneg;
2683 if (GET_CODE (tem) == NEG)
2684 tem = XEXP (tem, 0), lneg = !lneg;
2685 if (GET_CODE (tem) == CONST_INT && lneg)
2686 tem = neg_const_int (mode, tem), lneg = 0;
2688 ops[i].op = tem;
2689 ops[i].neg = lneg;
2690 ops[j].op = NULL_RTX;
2691 changed = 1;
2696 first = 0;
2698 while (changed);
2700 /* Pack all the operands to the lower-numbered entries. */
2701 for (i = 0, j = 0; j < n_ops; j++)
2702 if (ops[j].op)
2703 ops[i++] = ops[j];
2704 n_ops = i;
2706 /* Sort the operations based on swap_commutative_operands_p. */
2707 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2709 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2710 if (n_ops == 2
2711 && GET_CODE (ops[1].op) == CONST_INT
2712 && CONSTANT_P (ops[0].op)
2713 && ops[0].neg)
2714 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2716 /* We suppressed creation of trivial CONST expressions in the
2717 combination loop to avoid recursion. Create one manually now.
2718 The combination loop should have ensured that there is exactly
2719 one CONST_INT, and the sort will have ensured that it is last
2720 in the array and that any other constant will be next-to-last. */
2722 if (n_ops > 1
2723 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2724 && CONSTANT_P (ops[n_ops - 2].op))
2726 rtx value = ops[n_ops - 1].op;
2727 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2728 value = neg_const_int (mode, value);
2729 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2730 n_ops--;
2733 /* Count the number of CONSTs that we generated. */
2734 n_consts = 0;
2735 for (i = 0; i < n_ops; i++)
2736 if (GET_CODE (ops[i].op) == CONST)
2737 n_consts++;
2739 /* Give up if we didn't reduce the number of operands we had. Make
2740 sure we count a CONST as two operands. If we have the same
2741 number of operands, but have made more CONSTs than before, this
2742 is also an improvement, so accept it. */
2743 if (!force
2744 && (n_ops + n_consts > input_ops
2745 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2746 return NULL_RTX;
2748 /* Put a non-negated operand first, if possible. */
2750 for (i = 0; i < n_ops && ops[i].neg; i++)
2751 continue;
2752 if (i == n_ops)
2753 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2754 else if (i != 0)
2756 tem = ops[0].op;
2757 ops[0] = ops[i];
2758 ops[i].op = tem;
2759 ops[i].neg = 1;
2762 /* Now make the result by performing the requested operations. */
2763 result = ops[0].op;
2764 for (i = 1; i < n_ops; i++)
2765 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2766 mode, result, ops[i].op);
2768 return result;
2771 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2772 static bool
2773 plus_minus_operand_p (rtx x)
2775 return GET_CODE (x) == PLUS
2776 || GET_CODE (x) == MINUS
2777 || (GET_CODE (x) == CONST
2778 && GET_CODE (XEXP (x, 0)) == PLUS
2779 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2780 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2783 /* Like simplify_binary_operation except used for relational operators.
2784 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2785 not also be VOIDmode.
2787 CMP_MODE specifies in which mode the comparison is done in, so it is
2788 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2789 the operands or, if both are VOIDmode, the operands are compared in
2790 "infinite precision". */
2792 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2793 enum machine_mode cmp_mode, rtx op0, rtx op1)
2795 rtx tem, trueop0, trueop1;
2797 if (cmp_mode == VOIDmode)
2798 cmp_mode = GET_MODE (op0);
2799 if (cmp_mode == VOIDmode)
2800 cmp_mode = GET_MODE (op1);
2802 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2803 if (tem)
2805 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2807 if (tem == const0_rtx)
2808 return CONST0_RTX (mode);
2809 #ifdef FLOAT_STORE_FLAG_VALUE
2811 REAL_VALUE_TYPE val;
2812 val = FLOAT_STORE_FLAG_VALUE (mode);
2813 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2815 #else
2816 return NULL_RTX;
2817 #endif
2819 if (VECTOR_MODE_P (mode))
2821 if (tem == const0_rtx)
2822 return CONST0_RTX (mode);
2823 #ifdef VECTOR_STORE_FLAG_VALUE
2825 int i, units;
2826 rtvec v;
2828 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2829 if (val == NULL_RTX)
2830 return NULL_RTX;
2831 if (val == const1_rtx)
2832 return CONST1_RTX (mode);
2834 units = GET_MODE_NUNITS (mode);
2835 v = rtvec_alloc (units);
2836 for (i = 0; i < units; i++)
2837 RTVEC_ELT (v, i) = val;
2838 return gen_rtx_raw_CONST_VECTOR (mode, v);
2840 #else
2841 return NULL_RTX;
2842 #endif
2845 return tem;
2848 /* For the following tests, ensure const0_rtx is op1. */
2849 if (swap_commutative_operands_p (op0, op1)
2850 || (op0 == const0_rtx && op1 != const0_rtx))
2851 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2853 /* If op0 is a compare, extract the comparison arguments from it. */
2854 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2855 return simplify_relational_operation (code, mode, VOIDmode,
2856 XEXP (op0, 0), XEXP (op0, 1));
2858 if (mode == VOIDmode
2859 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2860 || CC0_P (op0))
2861 return NULL_RTX;
2863 trueop0 = avoid_constant_pool_reference (op0);
2864 trueop1 = avoid_constant_pool_reference (op1);
2865 return simplify_relational_operation_1 (code, mode, cmp_mode,
2866 trueop0, trueop1);
2869 /* This part of simplify_relational_operation is only used when CMP_MODE
2870 is not in class MODE_CC (i.e. it is a real comparison).
2872 MODE is the mode of the result, while CMP_MODE specifies in which
2873 mode the comparison is done in, so it is the mode of the operands. */
2875 static rtx
2876 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2877 enum machine_mode cmp_mode, rtx op0, rtx op1)
2879 enum rtx_code op0code = GET_CODE (op0);
2881 if (GET_CODE (op1) == CONST_INT)
2883 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2885 /* If op0 is a comparison, extract the comparison arguments form it. */
2886 if (code == NE)
2888 if (GET_MODE (op0) == cmp_mode)
2889 return simplify_rtx (op0);
2890 else
2891 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2892 XEXP (op0, 0), XEXP (op0, 1));
2894 else if (code == EQ)
2896 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2897 if (new_code != UNKNOWN)
2898 return simplify_gen_relational (new_code, mode, VOIDmode,
2899 XEXP (op0, 0), XEXP (op0, 1));
2904 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2905 if ((code == EQ || code == NE)
2906 && (op0code == PLUS || op0code == MINUS)
2907 && CONSTANT_P (op1)
2908 && CONSTANT_P (XEXP (op0, 1))
2909 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
2911 rtx x = XEXP (op0, 0);
2912 rtx c = XEXP (op0, 1);
2914 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
2915 cmp_mode, op1, c);
2916 return simplify_gen_relational (code, mode, cmp_mode, x, c);
2919 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
2920 the same as (zero_extract:SI FOO (const_int 1) BAR). */
2921 if (code == NE
2922 && op1 == const0_rtx
2923 && GET_MODE_CLASS (mode) == MODE_INT
2924 && cmp_mode != VOIDmode
2925 /* ??? Work-around BImode bugs in the ia64 backend. */
2926 && mode != BImode
2927 && cmp_mode != BImode
2928 && nonzero_bits (op0, cmp_mode) == 1
2929 && STORE_FLAG_VALUE == 1)
2930 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
2931 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
2932 : lowpart_subreg (mode, op0, cmp_mode);
2934 return NULL_RTX;
2937 /* Check if the given comparison (done in the given MODE) is actually a
2938 tautology or a contradiction.
2939 If no simplification is possible, this function returns zero.
2940 Otherwise, it returns either const_true_rtx or const0_rtx. */
2943 simplify_const_relational_operation (enum rtx_code code,
2944 enum machine_mode mode,
2945 rtx op0, rtx op1)
2947 int equal, op0lt, op0ltu, op1lt, op1ltu;
2948 rtx tem;
2949 rtx trueop0;
2950 rtx trueop1;
2952 gcc_assert (mode != VOIDmode
2953 || (GET_MODE (op0) == VOIDmode
2954 && GET_MODE (op1) == VOIDmode));
2956 /* If op0 is a compare, extract the comparison arguments from it. */
2957 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2958 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2960 /* We can't simplify MODE_CC values since we don't know what the
2961 actual comparison is. */
2962 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2963 return 0;
2965 /* Make sure the constant is second. */
2966 if (swap_commutative_operands_p (op0, op1))
2968 tem = op0, op0 = op1, op1 = tem;
2969 code = swap_condition (code);
2972 trueop0 = avoid_constant_pool_reference (op0);
2973 trueop1 = avoid_constant_pool_reference (op1);
2975 /* For integer comparisons of A and B maybe we can simplify A - B and can
2976 then simplify a comparison of that with zero. If A and B are both either
2977 a register or a CONST_INT, this can't help; testing for these cases will
2978 prevent infinite recursion here and speed things up.
2980 If CODE is an unsigned comparison, then we can never do this optimization,
2981 because it gives an incorrect result if the subtraction wraps around zero.
2982 ANSI C defines unsigned operations such that they never overflow, and
2983 thus such cases can not be ignored; but we cannot do it even for
2984 signed comparisons for languages such as Java, so test flag_wrapv. */
2986 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2987 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2988 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2989 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2990 /* We cannot do this for == or != if tem is a nonzero address. */
2991 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2992 && code != GTU && code != GEU && code != LTU && code != LEU)
2993 return simplify_const_relational_operation (signed_condition (code),
2994 mode, tem, const0_rtx);
2996 if (flag_unsafe_math_optimizations && code == ORDERED)
2997 return const_true_rtx;
2999 if (flag_unsafe_math_optimizations && code == UNORDERED)
3000 return const0_rtx;
3002 /* For modes without NaNs, if the two operands are equal, we know the
3003 result except if they have side-effects. */
3004 if (! HONOR_NANS (GET_MODE (trueop0))
3005 && rtx_equal_p (trueop0, trueop1)
3006 && ! side_effects_p (trueop0))
3007 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3009 /* If the operands are floating-point constants, see if we can fold
3010 the result. */
3011 else if (GET_CODE (trueop0) == CONST_DOUBLE
3012 && GET_CODE (trueop1) == CONST_DOUBLE
3013 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
3015 REAL_VALUE_TYPE d0, d1;
3017 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3018 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3020 /* Comparisons are unordered iff at least one of the values is NaN. */
3021 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3022 switch (code)
3024 case UNEQ:
3025 case UNLT:
3026 case UNGT:
3027 case UNLE:
3028 case UNGE:
3029 case NE:
3030 case UNORDERED:
3031 return const_true_rtx;
3032 case EQ:
3033 case LT:
3034 case GT:
3035 case LE:
3036 case GE:
3037 case LTGT:
3038 case ORDERED:
3039 return const0_rtx;
3040 default:
3041 return 0;
3044 equal = REAL_VALUES_EQUAL (d0, d1);
3045 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3046 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3049 /* Otherwise, see if the operands are both integers. */
3050 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3051 && (GET_CODE (trueop0) == CONST_DOUBLE
3052 || GET_CODE (trueop0) == CONST_INT)
3053 && (GET_CODE (trueop1) == CONST_DOUBLE
3054 || GET_CODE (trueop1) == CONST_INT))
3056 int width = GET_MODE_BITSIZE (mode);
3057 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3058 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3060 /* Get the two words comprising each integer constant. */
3061 if (GET_CODE (trueop0) == CONST_DOUBLE)
3063 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3064 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3066 else
3068 l0u = l0s = INTVAL (trueop0);
3069 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3072 if (GET_CODE (trueop1) == CONST_DOUBLE)
3074 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3075 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3077 else
3079 l1u = l1s = INTVAL (trueop1);
3080 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3083 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3084 we have to sign or zero-extend the values. */
3085 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3087 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3088 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3090 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3091 l0s |= ((HOST_WIDE_INT) (-1) << width);
3093 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3094 l1s |= ((HOST_WIDE_INT) (-1) << width);
3096 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3097 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3099 equal = (h0u == h1u && l0u == l1u);
3100 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3101 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3102 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3103 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3106 /* Otherwise, there are some code-specific tests we can make. */
3107 else
3109 /* Optimize comparisons with upper and lower bounds. */
3110 if (SCALAR_INT_MODE_P (mode)
3111 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3113 rtx mmin, mmax;
3114 int sign;
3116 if (code == GEU
3117 || code == LEU
3118 || code == GTU
3119 || code == LTU)
3120 sign = 0;
3121 else
3122 sign = 1;
3124 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3126 tem = NULL_RTX;
3127 switch (code)
3129 case GEU:
3130 case GE:
3131 /* x >= min is always true. */
3132 if (rtx_equal_p (trueop1, mmin))
3133 tem = const_true_rtx;
3134 else
3135 break;
3137 case LEU:
3138 case LE:
3139 /* x <= max is always true. */
3140 if (rtx_equal_p (trueop1, mmax))
3141 tem = const_true_rtx;
3142 break;
3144 case GTU:
3145 case GT:
3146 /* x > max is always false. */
3147 if (rtx_equal_p (trueop1, mmax))
3148 tem = const0_rtx;
3149 break;
3151 case LTU:
3152 case LT:
3153 /* x < min is always false. */
3154 if (rtx_equal_p (trueop1, mmin))
3155 tem = const0_rtx;
3156 break;
3158 default:
3159 break;
3161 if (tem == const0_rtx
3162 || tem == const_true_rtx)
3163 return tem;
3166 switch (code)
3168 case EQ:
3169 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3170 return const0_rtx;
3171 break;
3173 case NE:
3174 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3175 return const_true_rtx;
3176 break;
3178 case LT:
3179 /* Optimize abs(x) < 0.0. */
3180 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3182 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3183 : trueop0;
3184 if (GET_CODE (tem) == ABS)
3185 return const0_rtx;
3187 break;
3189 case GE:
3190 /* Optimize abs(x) >= 0.0. */
3191 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3193 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3194 : trueop0;
3195 if (GET_CODE (tem) == ABS)
3196 return const_true_rtx;
3198 break;
3200 case UNGE:
3201 /* Optimize ! (abs(x) < 0.0). */
3202 if (trueop1 == CONST0_RTX (mode))
3204 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3205 : trueop0;
3206 if (GET_CODE (tem) == ABS)
3207 return const_true_rtx;
3209 break;
3211 default:
3212 break;
3215 return 0;
3218 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3219 as appropriate. */
3220 switch (code)
3222 case EQ:
3223 case UNEQ:
3224 return equal ? const_true_rtx : const0_rtx;
3225 case NE:
3226 case LTGT:
3227 return ! equal ? const_true_rtx : const0_rtx;
3228 case LT:
3229 case UNLT:
3230 return op0lt ? const_true_rtx : const0_rtx;
3231 case GT:
3232 case UNGT:
3233 return op1lt ? const_true_rtx : const0_rtx;
3234 case LTU:
3235 return op0ltu ? const_true_rtx : const0_rtx;
3236 case GTU:
3237 return op1ltu ? const_true_rtx : const0_rtx;
3238 case LE:
3239 case UNLE:
3240 return equal || op0lt ? const_true_rtx : const0_rtx;
3241 case GE:
3242 case UNGE:
3243 return equal || op1lt ? const_true_rtx : const0_rtx;
3244 case LEU:
3245 return equal || op0ltu ? const_true_rtx : const0_rtx;
3246 case GEU:
3247 return equal || op1ltu ? const_true_rtx : const0_rtx;
3248 case ORDERED:
3249 return const_true_rtx;
3250 case UNORDERED:
3251 return const0_rtx;
3252 default:
3253 gcc_unreachable ();
3257 /* Simplify CODE, an operation with result mode MODE and three operands,
3258 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3259 a constant. Return 0 if no simplifications is possible. */
3262 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3263 enum machine_mode op0_mode, rtx op0, rtx op1,
3264 rtx op2)
3266 unsigned int width = GET_MODE_BITSIZE (mode);
3268 /* VOIDmode means "infinite" precision. */
3269 if (width == 0)
3270 width = HOST_BITS_PER_WIDE_INT;
3272 switch (code)
3274 case SIGN_EXTRACT:
3275 case ZERO_EXTRACT:
3276 if (GET_CODE (op0) == CONST_INT
3277 && GET_CODE (op1) == CONST_INT
3278 && GET_CODE (op2) == CONST_INT
3279 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3280 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3282 /* Extracting a bit-field from a constant */
3283 HOST_WIDE_INT val = INTVAL (op0);
3285 if (BITS_BIG_ENDIAN)
3286 val >>= (GET_MODE_BITSIZE (op0_mode)
3287 - INTVAL (op2) - INTVAL (op1));
3288 else
3289 val >>= INTVAL (op2);
3291 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3293 /* First zero-extend. */
3294 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3295 /* If desired, propagate sign bit. */
3296 if (code == SIGN_EXTRACT
3297 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3298 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3301 /* Clear the bits that don't belong in our mode,
3302 unless they and our sign bit are all one.
3303 So we get either a reasonable negative value or a reasonable
3304 unsigned value for this mode. */
3305 if (width < HOST_BITS_PER_WIDE_INT
3306 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3307 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3308 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3310 return gen_int_mode (val, mode);
3312 break;
3314 case IF_THEN_ELSE:
3315 if (GET_CODE (op0) == CONST_INT)
3316 return op0 != const0_rtx ? op1 : op2;
3318 /* Convert c ? a : a into "a". */
3319 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3320 return op1;
3322 /* Convert a != b ? a : b into "a". */
3323 if (GET_CODE (op0) == NE
3324 && ! side_effects_p (op0)
3325 && ! HONOR_NANS (mode)
3326 && ! HONOR_SIGNED_ZEROS (mode)
3327 && ((rtx_equal_p (XEXP (op0, 0), op1)
3328 && rtx_equal_p (XEXP (op0, 1), op2))
3329 || (rtx_equal_p (XEXP (op0, 0), op2)
3330 && rtx_equal_p (XEXP (op0, 1), op1))))
3331 return op1;
3333 /* Convert a == b ? a : b into "b". */
3334 if (GET_CODE (op0) == EQ
3335 && ! side_effects_p (op0)
3336 && ! HONOR_NANS (mode)
3337 && ! HONOR_SIGNED_ZEROS (mode)
3338 && ((rtx_equal_p (XEXP (op0, 0), op1)
3339 && rtx_equal_p (XEXP (op0, 1), op2))
3340 || (rtx_equal_p (XEXP (op0, 0), op2)
3341 && rtx_equal_p (XEXP (op0, 1), op1))))
3342 return op2;
3344 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3346 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3347 ? GET_MODE (XEXP (op0, 1))
3348 : GET_MODE (XEXP (op0, 0)));
3349 rtx temp;
3351 /* Look for happy constants in op1 and op2. */
3352 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3354 HOST_WIDE_INT t = INTVAL (op1);
3355 HOST_WIDE_INT f = INTVAL (op2);
3357 if (t == STORE_FLAG_VALUE && f == 0)
3358 code = GET_CODE (op0);
3359 else if (t == 0 && f == STORE_FLAG_VALUE)
3361 enum rtx_code tmp;
3362 tmp = reversed_comparison_code (op0, NULL_RTX);
3363 if (tmp == UNKNOWN)
3364 break;
3365 code = tmp;
3367 else
3368 break;
3370 return simplify_gen_relational (code, mode, cmp_mode,
3371 XEXP (op0, 0), XEXP (op0, 1));
3374 if (cmp_mode == VOIDmode)
3375 cmp_mode = op0_mode;
3376 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3377 cmp_mode, XEXP (op0, 0),
3378 XEXP (op0, 1));
3380 /* See if any simplifications were possible. */
3381 if (temp)
3383 if (GET_CODE (temp) == CONST_INT)
3384 return temp == const0_rtx ? op2 : op1;
3385 else if (temp)
3386 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3389 break;
3391 case VEC_MERGE:
3392 gcc_assert (GET_MODE (op0) == mode);
3393 gcc_assert (GET_MODE (op1) == mode);
3394 gcc_assert (VECTOR_MODE_P (mode));
3395 op2 = avoid_constant_pool_reference (op2);
3396 if (GET_CODE (op2) == CONST_INT)
3398 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3399 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3400 int mask = (1 << n_elts) - 1;
3402 if (!(INTVAL (op2) & mask))
3403 return op1;
3404 if ((INTVAL (op2) & mask) == mask)
3405 return op0;
3407 op0 = avoid_constant_pool_reference (op0);
3408 op1 = avoid_constant_pool_reference (op1);
3409 if (GET_CODE (op0) == CONST_VECTOR
3410 && GET_CODE (op1) == CONST_VECTOR)
3412 rtvec v = rtvec_alloc (n_elts);
3413 unsigned int i;
3415 for (i = 0; i < n_elts; i++)
3416 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3417 ? CONST_VECTOR_ELT (op0, i)
3418 : CONST_VECTOR_ELT (op1, i));
3419 return gen_rtx_CONST_VECTOR (mode, v);
3422 break;
3424 default:
3425 gcc_unreachable ();
3428 return 0;
3431 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3432 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3434 Works by unpacking OP into a collection of 8-bit values
3435 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3436 and then repacking them again for OUTERMODE. */
3438 static rtx
3439 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3440 enum machine_mode innermode, unsigned int byte)
3442 /* We support up to 512-bit values (for V8DFmode). */
3443 enum {
3444 max_bitsize = 512,
3445 value_bit = 8,
3446 value_mask = (1 << value_bit) - 1
3448 unsigned char value[max_bitsize / value_bit];
3449 int value_start;
3450 int i;
3451 int elem;
3453 int num_elem;
3454 rtx * elems;
3455 int elem_bitsize;
3456 rtx result_s;
3457 rtvec result_v = NULL;
3458 enum mode_class outer_class;
3459 enum machine_mode outer_submode;
3461 /* Some ports misuse CCmode. */
3462 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3463 return op;
3465 /* We have no way to represent a complex constant at the rtl level. */
3466 if (COMPLEX_MODE_P (outermode))
3467 return NULL_RTX;
3469 /* Unpack the value. */
3471 if (GET_CODE (op) == CONST_VECTOR)
3473 num_elem = CONST_VECTOR_NUNITS (op);
3474 elems = &CONST_VECTOR_ELT (op, 0);
3475 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3477 else
3479 num_elem = 1;
3480 elems = &op;
3481 elem_bitsize = max_bitsize;
3483 /* If this asserts, it is too complicated; reducing value_bit may help. */
3484 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3485 /* I don't know how to handle endianness of sub-units. */
3486 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3488 for (elem = 0; elem < num_elem; elem++)
3490 unsigned char * vp;
3491 rtx el = elems[elem];
3493 /* Vectors are kept in target memory order. (This is probably
3494 a mistake.) */
3496 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3497 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3498 / BITS_PER_UNIT);
3499 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3500 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3501 unsigned bytele = (subword_byte % UNITS_PER_WORD
3502 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3503 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3506 switch (GET_CODE (el))
3508 case CONST_INT:
3509 for (i = 0;
3510 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3511 i += value_bit)
3512 *vp++ = INTVAL (el) >> i;
3513 /* CONST_INTs are always logically sign-extended. */
3514 for (; i < elem_bitsize; i += value_bit)
3515 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3516 break;
3518 case CONST_DOUBLE:
3519 if (GET_MODE (el) == VOIDmode)
3521 /* If this triggers, someone should have generated a
3522 CONST_INT instead. */
3523 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3525 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3526 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3527 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3529 *vp++
3530 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3531 i += value_bit;
3533 /* It shouldn't matter what's done here, so fill it with
3534 zero. */
3535 for (; i < max_bitsize; i += value_bit)
3536 *vp++ = 0;
3538 else
3540 long tmp[max_bitsize / 32];
3541 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3543 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3544 gcc_assert (bitsize <= elem_bitsize);
3545 gcc_assert (bitsize % value_bit == 0);
3547 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3548 GET_MODE (el));
3550 /* real_to_target produces its result in words affected by
3551 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3552 and use WORDS_BIG_ENDIAN instead; see the documentation
3553 of SUBREG in rtl.texi. */
3554 for (i = 0; i < bitsize; i += value_bit)
3556 int ibase;
3557 if (WORDS_BIG_ENDIAN)
3558 ibase = bitsize - 1 - i;
3559 else
3560 ibase = i;
3561 *vp++ = tmp[ibase / 32] >> i % 32;
3564 /* It shouldn't matter what's done here, so fill it with
3565 zero. */
3566 for (; i < elem_bitsize; i += value_bit)
3567 *vp++ = 0;
3569 break;
3571 default:
3572 gcc_unreachable ();
3576 /* Now, pick the right byte to start with. */
3577 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3578 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3579 will already have offset 0. */
3580 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3582 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3583 - byte);
3584 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3585 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3586 byte = (subword_byte % UNITS_PER_WORD
3587 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3590 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3591 so if it's become negative it will instead be very large.) */
3592 gcc_assert (byte < GET_MODE_SIZE (innermode));
3594 /* Convert from bytes to chunks of size value_bit. */
3595 value_start = byte * (BITS_PER_UNIT / value_bit);
3597 /* Re-pack the value. */
3599 if (VECTOR_MODE_P (outermode))
3601 num_elem = GET_MODE_NUNITS (outermode);
3602 result_v = rtvec_alloc (num_elem);
3603 elems = &RTVEC_ELT (result_v, 0);
3604 outer_submode = GET_MODE_INNER (outermode);
3606 else
3608 num_elem = 1;
3609 elems = &result_s;
3610 outer_submode = outermode;
3613 outer_class = GET_MODE_CLASS (outer_submode);
3614 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3616 gcc_assert (elem_bitsize % value_bit == 0);
3617 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3619 for (elem = 0; elem < num_elem; elem++)
3621 unsigned char *vp;
3623 /* Vectors are stored in target memory order. (This is probably
3624 a mistake.) */
3626 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3627 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3628 / BITS_PER_UNIT);
3629 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3630 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3631 unsigned bytele = (subword_byte % UNITS_PER_WORD
3632 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3633 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3636 switch (outer_class)
3638 case MODE_INT:
3639 case MODE_PARTIAL_INT:
3641 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3643 for (i = 0;
3644 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3645 i += value_bit)
3646 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3647 for (; i < elem_bitsize; i += value_bit)
3648 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3649 << (i - HOST_BITS_PER_WIDE_INT));
3651 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3652 know why. */
3653 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3654 elems[elem] = gen_int_mode (lo, outer_submode);
3655 else
3656 elems[elem] = immed_double_const (lo, hi, outer_submode);
3658 break;
3660 case MODE_FLOAT:
3662 REAL_VALUE_TYPE r;
3663 long tmp[max_bitsize / 32];
3665 /* real_from_target wants its input in words affected by
3666 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3667 and use WORDS_BIG_ENDIAN instead; see the documentation
3668 of SUBREG in rtl.texi. */
3669 for (i = 0; i < max_bitsize / 32; i++)
3670 tmp[i] = 0;
3671 for (i = 0; i < elem_bitsize; i += value_bit)
3673 int ibase;
3674 if (WORDS_BIG_ENDIAN)
3675 ibase = elem_bitsize - 1 - i;
3676 else
3677 ibase = i;
3678 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3681 real_from_target (&r, tmp, outer_submode);
3682 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3684 break;
3686 default:
3687 gcc_unreachable ();
3690 if (VECTOR_MODE_P (outermode))
3691 return gen_rtx_CONST_VECTOR (outermode, result_v);
3692 else
3693 return result_s;
3696 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3697 Return 0 if no simplifications are possible. */
3699 simplify_subreg (enum machine_mode outermode, rtx op,
3700 enum machine_mode innermode, unsigned int byte)
3702 /* Little bit of sanity checking. */
3703 gcc_assert (innermode != VOIDmode);
3704 gcc_assert (outermode != VOIDmode);
3705 gcc_assert (innermode != BLKmode);
3706 gcc_assert (outermode != BLKmode);
3708 gcc_assert (GET_MODE (op) == innermode
3709 || GET_MODE (op) == VOIDmode);
3711 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3712 gcc_assert (byte < GET_MODE_SIZE (innermode));
3714 if (outermode == innermode && !byte)
3715 return op;
3717 if (GET_CODE (op) == CONST_INT
3718 || GET_CODE (op) == CONST_DOUBLE
3719 || GET_CODE (op) == CONST_VECTOR)
3720 return simplify_immed_subreg (outermode, op, innermode, byte);
3722 /* Changing mode twice with SUBREG => just change it once,
3723 or not at all if changing back op starting mode. */
3724 if (GET_CODE (op) == SUBREG)
3726 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3727 int final_offset = byte + SUBREG_BYTE (op);
3728 rtx newx;
3730 if (outermode == innermostmode
3731 && byte == 0 && SUBREG_BYTE (op) == 0)
3732 return SUBREG_REG (op);
3734 /* The SUBREG_BYTE represents offset, as if the value were stored
3735 in memory. Irritating exception is paradoxical subreg, where
3736 we define SUBREG_BYTE to be 0. On big endian machines, this
3737 value should be negative. For a moment, undo this exception. */
3738 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3740 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3741 if (WORDS_BIG_ENDIAN)
3742 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3743 if (BYTES_BIG_ENDIAN)
3744 final_offset += difference % UNITS_PER_WORD;
3746 if (SUBREG_BYTE (op) == 0
3747 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3749 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3750 if (WORDS_BIG_ENDIAN)
3751 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3752 if (BYTES_BIG_ENDIAN)
3753 final_offset += difference % UNITS_PER_WORD;
3756 /* See whether resulting subreg will be paradoxical. */
3757 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3759 /* In nonparadoxical subregs we can't handle negative offsets. */
3760 if (final_offset < 0)
3761 return NULL_RTX;
3762 /* Bail out in case resulting subreg would be incorrect. */
3763 if (final_offset % GET_MODE_SIZE (outermode)
3764 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3765 return NULL_RTX;
3767 else
3769 int offset = 0;
3770 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3772 /* In paradoxical subreg, see if we are still looking on lower part.
3773 If so, our SUBREG_BYTE will be 0. */
3774 if (WORDS_BIG_ENDIAN)
3775 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3776 if (BYTES_BIG_ENDIAN)
3777 offset += difference % UNITS_PER_WORD;
3778 if (offset == final_offset)
3779 final_offset = 0;
3780 else
3781 return NULL_RTX;
3784 /* Recurse for further possible simplifications. */
3785 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3786 final_offset);
3787 if (newx)
3788 return newx;
3789 if (validate_subreg (outermode, innermostmode,
3790 SUBREG_REG (op), final_offset))
3791 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3792 return NULL_RTX;
3795 /* SUBREG of a hard register => just change the register number
3796 and/or mode. If the hard register is not valid in that mode,
3797 suppress this simplification. If the hard register is the stack,
3798 frame, or argument pointer, leave this as a SUBREG. */
3800 if (REG_P (op)
3801 && REGNO (op) < FIRST_PSEUDO_REGISTER
3802 #ifdef CANNOT_CHANGE_MODE_CLASS
3803 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3804 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3805 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3806 #endif
3807 && ((reload_completed && !frame_pointer_needed)
3808 || (REGNO (op) != FRAME_POINTER_REGNUM
3809 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3810 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3811 #endif
3813 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3814 && REGNO (op) != ARG_POINTER_REGNUM
3815 #endif
3816 && REGNO (op) != STACK_POINTER_REGNUM
3817 && subreg_offset_representable_p (REGNO (op), innermode,
3818 byte, outermode))
3820 unsigned int regno = REGNO (op);
3821 unsigned int final_regno
3822 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3824 /* ??? We do allow it if the current REG is not valid for
3825 its mode. This is a kludge to work around how float/complex
3826 arguments are passed on 32-bit SPARC and should be fixed. */
3827 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3828 || ! HARD_REGNO_MODE_OK (regno, innermode))
3830 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3832 /* Propagate original regno. We don't have any way to specify
3833 the offset inside original regno, so do so only for lowpart.
3834 The information is used only by alias analysis that can not
3835 grog partial register anyway. */
3837 if (subreg_lowpart_offset (outermode, innermode) == byte)
3838 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3839 return x;
3843 /* If we have a SUBREG of a register that we are replacing and we are
3844 replacing it with a MEM, make a new MEM and try replacing the
3845 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3846 or if we would be widening it. */
3848 if (MEM_P (op)
3849 && ! mode_dependent_address_p (XEXP (op, 0))
3850 /* Allow splitting of volatile memory references in case we don't
3851 have instruction to move the whole thing. */
3852 && (! MEM_VOLATILE_P (op)
3853 || ! have_insn_for (SET, innermode))
3854 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3855 return adjust_address_nv (op, outermode, byte);
3857 /* Handle complex values represented as CONCAT
3858 of real and imaginary part. */
3859 if (GET_CODE (op) == CONCAT)
3861 unsigned int inner_size, final_offset;
3862 rtx part, res;
3864 inner_size = GET_MODE_UNIT_SIZE (innermode);
3865 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3866 final_offset = byte % inner_size;
3867 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3868 return NULL_RTX;
3870 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3871 if (res)
3872 return res;
3873 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3874 return gen_rtx_SUBREG (outermode, part, final_offset);
3875 return NULL_RTX;
3878 /* Optimize SUBREG truncations of zero and sign extended values. */
3879 if ((GET_CODE (op) == ZERO_EXTEND
3880 || GET_CODE (op) == SIGN_EXTEND)
3881 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3883 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3885 /* If we're requesting the lowpart of a zero or sign extension,
3886 there are three possibilities. If the outermode is the same
3887 as the origmode, we can omit both the extension and the subreg.
3888 If the outermode is not larger than the origmode, we can apply
3889 the truncation without the extension. Finally, if the outermode
3890 is larger than the origmode, but both are integer modes, we
3891 can just extend to the appropriate mode. */
3892 if (bitpos == 0)
3894 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3895 if (outermode == origmode)
3896 return XEXP (op, 0);
3897 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3898 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3899 subreg_lowpart_offset (outermode,
3900 origmode));
3901 if (SCALAR_INT_MODE_P (outermode))
3902 return simplify_gen_unary (GET_CODE (op), outermode,
3903 XEXP (op, 0), origmode);
3906 /* A SUBREG resulting from a zero extension may fold to zero if
3907 it extracts higher bits that the ZERO_EXTEND's source bits. */
3908 if (GET_CODE (op) == ZERO_EXTEND
3909 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3910 return CONST0_RTX (outermode);
3913 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3914 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3915 the outer subreg is effectively a truncation to the original mode. */
3916 if ((GET_CODE (op) == LSHIFTRT
3917 || GET_CODE (op) == ASHIFTRT)
3918 && SCALAR_INT_MODE_P (outermode)
3919 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3920 to avoid the possibility that an outer LSHIFTRT shifts by more
3921 than the sign extension's sign_bit_copies and introduces zeros
3922 into the high bits of the result. */
3923 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
3924 && GET_CODE (XEXP (op, 1)) == CONST_INT
3925 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
3926 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3927 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3928 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3929 return simplify_gen_binary (ASHIFTRT, outermode,
3930 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3932 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
3933 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
3934 the outer subreg is effectively a truncation to the original mode. */
3935 if ((GET_CODE (op) == LSHIFTRT
3936 || GET_CODE (op) == ASHIFTRT)
3937 && SCALAR_INT_MODE_P (outermode)
3938 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3939 && GET_CODE (XEXP (op, 1)) == CONST_INT
3940 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3941 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3942 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3943 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3944 return simplify_gen_binary (LSHIFTRT, outermode,
3945 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3947 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
3948 to (ashift:QI (x:QI) C), where C is a suitable small constant and
3949 the outer subreg is effectively a truncation to the original mode. */
3950 if (GET_CODE (op) == ASHIFT
3951 && SCALAR_INT_MODE_P (outermode)
3952 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3953 && GET_CODE (XEXP (op, 1)) == CONST_INT
3954 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3955 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
3956 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3957 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3958 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3959 return simplify_gen_binary (ASHIFT, outermode,
3960 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3962 return NULL_RTX;
3965 /* Make a SUBREG operation or equivalent if it folds. */
3968 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3969 enum machine_mode innermode, unsigned int byte)
3971 rtx newx;
3973 newx = simplify_subreg (outermode, op, innermode, byte);
3974 if (newx)
3975 return newx;
3977 if (GET_CODE (op) == SUBREG
3978 || GET_CODE (op) == CONCAT
3979 || GET_MODE (op) == VOIDmode)
3980 return NULL_RTX;
3982 if (validate_subreg (outermode, innermode, op, byte))
3983 return gen_rtx_SUBREG (outermode, op, byte);
3985 return NULL_RTX;
3988 /* Simplify X, an rtx expression.
3990 Return the simplified expression or NULL if no simplifications
3991 were possible.
3993 This is the preferred entry point into the simplification routines;
3994 however, we still allow passes to call the more specific routines.
3996 Right now GCC has three (yes, three) major bodies of RTL simplification
3997 code that need to be unified.
3999 1. fold_rtx in cse.c. This code uses various CSE specific
4000 information to aid in RTL simplification.
4002 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4003 it uses combine specific information to aid in RTL
4004 simplification.
4006 3. The routines in this file.
4009 Long term we want to only have one body of simplification code; to
4010 get to that state I recommend the following steps:
4012 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4013 which are not pass dependent state into these routines.
4015 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4016 use this routine whenever possible.
4018 3. Allow for pass dependent state to be provided to these
4019 routines and add simplifications based on the pass dependent
4020 state. Remove code from cse.c & combine.c that becomes
4021 redundant/dead.
4023 It will take time, but ultimately the compiler will be easier to
4024 maintain and improve. It's totally silly that when we add a
4025 simplification that it needs to be added to 4 places (3 for RTL
4026 simplification and 1 for tree simplification. */
4029 simplify_rtx (rtx x)
4031 enum rtx_code code = GET_CODE (x);
4032 enum machine_mode mode = GET_MODE (x);
4034 switch (GET_RTX_CLASS (code))
4036 case RTX_UNARY:
4037 return simplify_unary_operation (code, mode,
4038 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4039 case RTX_COMM_ARITH:
4040 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4041 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4043 /* Fall through.... */
4045 case RTX_BIN_ARITH:
4046 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4048 case RTX_TERNARY:
4049 case RTX_BITFIELD_OPS:
4050 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4051 XEXP (x, 0), XEXP (x, 1),
4052 XEXP (x, 2));
4054 case RTX_COMPARE:
4055 case RTX_COMM_COMPARE:
4056 return simplify_relational_operation (code, mode,
4057 ((GET_MODE (XEXP (x, 0))
4058 != VOIDmode)
4059 ? GET_MODE (XEXP (x, 0))
4060 : GET_MODE (XEXP (x, 1))),
4061 XEXP (x, 0),
4062 XEXP (x, 1));
4064 case RTX_EXTRA:
4065 if (code == SUBREG)
4066 return simplify_gen_subreg (mode, SUBREG_REG (x),
4067 GET_MODE (SUBREG_REG (x)),
4068 SUBREG_BYTE (x));
4069 break;
4071 case RTX_OBJ:
4072 if (code == LO_SUM)
4074 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4075 if (GET_CODE (XEXP (x, 0)) == HIGH
4076 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4077 return XEXP (x, 1);
4079 break;
4081 default:
4082 break;
4084 return NULL;