* config/xtensa/lib1funcs.asm (__mulsi3): Use symbolic name for ACCLO.
[official-gcc.git] / gcc / simplify-rtx.c
bloba7033315e98fedca52970558c7b3f8d54c2519e0
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 rtx, rtx, rtx, rtx);
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
69 static rtx
70 neg_const_int (enum machine_mode mode, rtx i)
72 return gen_int_mode (- INTVAL (i), mode);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
78 bool
79 mode_signbit_p (enum machine_mode mode, rtx x)
81 unsigned HOST_WIDE_INT val;
82 unsigned int width;
84 if (GET_MODE_CLASS (mode) != MODE_INT)
85 return false;
87 width = GET_MODE_BITSIZE (mode);
88 if (width == 0)
89 return false;
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
93 val = INTVAL (x);
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
101 else
102 return false;
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114 rtx op1)
116 rtx tem;
118 /* Put complex operands first and constants second if commutative. */
119 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
120 && swap_commutative_operands_p (op0, op1))
121 tem = op0, op0 = op1, op1 = tem;
123 /* If this simplifies, do it. */
124 tem = simplify_binary_operation (code, mode, op0, op1);
125 if (tem)
126 return tem;
128 /* Handle addition and subtraction specially. Otherwise, just form
129 the operation. */
131 if (code == PLUS || code == MINUS)
133 tem = simplify_plus_minus (code, mode, op0, op1, 1);
134 if (tem)
135 return tem;
138 return gen_rtx_fmt_ee (code, mode, op0, op1);
141 /* If X is a MEM referencing the constant pool, return the real value.
142 Otherwise return X. */
144 avoid_constant_pool_reference (rtx x)
146 rtx c, tmp, addr;
147 enum machine_mode cmode;
149 switch (GET_CODE (x))
151 case MEM:
152 break;
154 case FLOAT_EXTEND:
155 /* Handle float extensions of constant pool references. */
156 tmp = XEXP (x, 0);
157 c = avoid_constant_pool_reference (tmp);
158 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
160 REAL_VALUE_TYPE d;
162 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
163 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
165 return x;
167 default:
168 return x;
171 addr = XEXP (x, 0);
173 /* Call target hook to avoid the effects of -fpic etc.... */
174 addr = targetm.delegitimize_address (addr);
176 if (GET_CODE (addr) == LO_SUM)
177 addr = XEXP (addr, 1);
179 if (GET_CODE (addr) != SYMBOL_REF
180 || ! CONSTANT_POOL_ADDRESS_P (addr))
181 return x;
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (cmode != GET_MODE (x))
191 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
192 return c ? c : x;
195 return c;
198 /* Make a unary operation by first seeing if it folds and otherwise making
199 the specified operation. */
202 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
203 enum machine_mode op_mode)
205 rtx tem;
207 /* If this simplifies, use it. */
208 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
209 return tem;
211 return gen_rtx_fmt_e (code, mode, op);
214 /* Likewise for ternary operations. */
217 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
218 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
220 rtx tem;
222 /* If this simplifies, use it. */
223 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
224 op0, op1, op2)))
225 return tem;
227 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
230 /* Likewise, for relational operations.
231 CMP_MODE specifies mode comparison is done in. */
234 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
235 enum machine_mode cmp_mode, rtx op0, rtx op1)
237 rtx tem;
239 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
240 op0, op1)))
241 return tem;
243 return gen_rtx_fmt_ee (code, mode, op0, op1);
246 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
247 resulting RTX. Return a new RTX which is as simplified as possible. */
250 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
252 enum rtx_code code = GET_CODE (x);
253 enum machine_mode mode = GET_MODE (x);
254 enum machine_mode op_mode;
255 rtx op0, op1, op2;
257 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
258 to build a new expression substituting recursively. If we can't do
259 anything, return our input. */
261 if (x == old_rtx)
262 return new_rtx;
264 switch (GET_RTX_CLASS (code))
266 case RTX_UNARY:
267 op0 = XEXP (x, 0);
268 op_mode = GET_MODE (op0);
269 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
270 if (op0 == XEXP (x, 0))
271 return x;
272 return simplify_gen_unary (code, mode, op0, op_mode);
274 case RTX_BIN_ARITH:
275 case RTX_COMM_ARITH:
276 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
277 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
278 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
279 return x;
280 return simplify_gen_binary (code, mode, op0, op1);
282 case RTX_COMPARE:
283 case RTX_COMM_COMPARE:
284 op0 = XEXP (x, 0);
285 op1 = XEXP (x, 1);
286 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
287 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
288 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
289 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
290 return x;
291 return simplify_gen_relational (code, mode, op_mode, op0, op1);
293 case RTX_TERNARY:
294 case RTX_BITFIELD_OPS:
295 op0 = XEXP (x, 0);
296 op_mode = GET_MODE (op0);
297 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
298 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
299 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
300 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
301 return x;
302 if (op_mode == VOIDmode)
303 op_mode = GET_MODE (op0);
304 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
306 case RTX_EXTRA:
307 /* The only case we try to handle is a SUBREG. */
308 if (code == SUBREG)
310 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
311 if (op0 == SUBREG_REG (x))
312 return x;
313 op0 = simplify_gen_subreg (GET_MODE (x), op0,
314 GET_MODE (SUBREG_REG (x)),
315 SUBREG_BYTE (x));
316 return op0 ? op0 : x;
318 break;
320 case RTX_OBJ:
321 if (code == MEM)
323 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
324 if (op0 == XEXP (x, 0))
325 return x;
326 return replace_equiv_address_nv (x, op0);
328 else if (code == LO_SUM)
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
333 /* (lo_sum (high x) x) -> x */
334 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
335 return op1;
337 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
338 return x;
339 return gen_rtx_LO_SUM (mode, op0, op1);
341 else if (code == REG)
343 if (rtx_equal_p (x, old_rtx))
344 return new_rtx;
346 break;
348 default:
349 break;
351 return x;
354 /* Try to simplify a unary operation CODE whose output mode is to be
355 MODE with input operand OP whose mode was originally OP_MODE.
356 Return zero if no simplification can be made. */
358 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
359 rtx op, enum machine_mode op_mode)
361 rtx trueop, tem;
363 if (GET_CODE (op) == CONST)
364 op = XEXP (op, 0);
366 trueop = avoid_constant_pool_reference (op);
368 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
369 if (tem)
370 return tem;
372 return simplify_unary_operation_1 (code, mode, op);
375 /* Perform some simplifications we can do even if the operands
376 aren't constant. */
377 static rtx
378 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
380 enum rtx_code reversed;
381 rtx temp;
383 switch (code)
385 case NOT:
386 /* (not (not X)) == X. */
387 if (GET_CODE (op) == NOT)
388 return XEXP (op, 0);
390 /* (not (eq X Y)) == (ne X Y), etc. */
391 if (COMPARISON_P (op)
392 && (mode == BImode || STORE_FLAG_VALUE == -1)
393 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
394 return simplify_gen_relational (reversed, mode, VOIDmode,
395 XEXP (op, 0), XEXP (op, 1));
397 /* (not (plus X -1)) can become (neg X). */
398 if (GET_CODE (op) == PLUS
399 && XEXP (op, 1) == constm1_rtx)
400 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
402 /* Similarly, (not (neg X)) is (plus X -1). */
403 if (GET_CODE (op) == NEG)
404 return plus_constant (XEXP (op, 0), -1);
406 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
407 if (GET_CODE (op) == XOR
408 && GET_CODE (XEXP (op, 1)) == CONST_INT
409 && (temp = simplify_unary_operation (NOT, mode,
410 XEXP (op, 1), mode)) != 0)
411 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
413 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
414 if (GET_CODE (op) == PLUS
415 && GET_CODE (XEXP (op, 1)) == CONST_INT
416 && mode_signbit_p (mode, XEXP (op, 1))
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
422 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
423 operands other than 1, but that is not valid. We could do a
424 similar simplification for (not (lshiftrt C X)) where C is
425 just the sign bit, but this doesn't seem common enough to
426 bother with. */
427 if (GET_CODE (op) == ASHIFT
428 && XEXP (op, 0) == const1_rtx)
430 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
431 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
434 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
435 by reversing the comparison code if valid. */
436 if (STORE_FLAG_VALUE == -1
437 && COMPARISON_P (op)
438 && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
439 return simplify_gen_relational (reversed, mode, VOIDmode,
440 XEXP (op, 0), XEXP (op, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1)) == CONST_INT
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
453 break;
455 case NEG:
456 /* (neg (neg X)) == X. */
457 if (GET_CODE (op) == NEG)
458 return XEXP (op, 0);
460 /* (neg (plus X 1)) can become (not X). */
461 if (GET_CODE (op) == PLUS
462 && XEXP (op, 1) == const1_rtx)
463 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
465 /* Similarly, (neg (not X)) is (plus X 1). */
466 if (GET_CODE (op) == NOT)
467 return plus_constant (XEXP (op, 0), 1);
469 /* (neg (minus X Y)) can become (minus Y X). This transformation
470 isn't safe for modes with signed zeros, since if X and Y are
471 both +0, (minus Y X) is the same as (minus X Y). If the
472 rounding mode is towards +infinity (or -infinity) then the two
473 expressions will be rounded differently. */
474 if (GET_CODE (op) == MINUS
475 && !HONOR_SIGNED_ZEROS (mode)
476 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
477 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
479 if (GET_CODE (op) == PLUS
480 && !HONOR_SIGNED_ZEROS (mode)
481 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
483 /* (neg (plus A C)) is simplified to (minus -C A). */
484 if (GET_CODE (XEXP (op, 1)) == CONST_INT
485 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
487 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
488 if (temp)
489 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
492 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
493 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
494 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
497 /* (neg (mult A B)) becomes (mult (neg A) B).
498 This works even for floating-point values. */
499 if (GET_CODE (op) == MULT
500 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
502 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
503 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
506 /* NEG commutes with ASHIFT since it is multiplication. Only do
507 this if we can then eliminate the NEG (e.g., if the operand
508 is a constant). */
509 if (GET_CODE (op) == ASHIFT)
511 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
512 if (temp)
513 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
516 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
517 C is equal to the width of MODE minus 1. */
518 if (GET_CODE (op) == ASHIFTRT
519 && GET_CODE (XEXP (op, 1)) == CONST_INT
520 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
521 return simplify_gen_binary (LSHIFTRT, mode,
522 XEXP (op, 0), XEXP (op, 1));
524 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
525 C is equal to the width of MODE minus 1. */
526 if (GET_CODE (op) == LSHIFTRT
527 && GET_CODE (XEXP (op, 1)) == CONST_INT
528 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
529 return simplify_gen_binary (ASHIFTRT, mode,
530 XEXP (op, 0), XEXP (op, 1));
532 break;
534 case SIGN_EXTEND:
535 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
536 becomes just the MINUS if its mode is MODE. This allows
537 folding switch statements on machines using casesi (such as
538 the VAX). */
539 if (GET_CODE (op) == TRUNCATE
540 && GET_MODE (XEXP (op, 0)) == mode
541 && GET_CODE (XEXP (op, 0)) == MINUS
542 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
543 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
544 return XEXP (op, 0);
546 /* Check for a sign extension of a subreg of a promoted
547 variable, where the promotion is sign-extended, and the
548 target mode is the same as the variable's promotion. */
549 if (GET_CODE (op) == SUBREG
550 && SUBREG_PROMOTED_VAR_P (op)
551 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
552 && GET_MODE (XEXP (op, 0)) == mode)
553 return XEXP (op, 0);
555 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
556 if (! POINTERS_EXTEND_UNSIGNED
557 && mode == Pmode && GET_MODE (op) == ptr_mode
558 && (CONSTANT_P (op)
559 || (GET_CODE (op) == SUBREG
560 && REG_P (SUBREG_REG (op))
561 && REG_POINTER (SUBREG_REG (op))
562 && GET_MODE (SUBREG_REG (op)) == Pmode)))
563 return convert_memory_address (Pmode, op);
564 #endif
565 break;
567 case ZERO_EXTEND:
568 /* Check for a zero extension of a subreg of a promoted
569 variable, where the promotion is zero-extended, and the
570 target mode is the same as the variable's promotion. */
571 if (GET_CODE (op) == SUBREG
572 && SUBREG_PROMOTED_VAR_P (op)
573 && SUBREG_PROMOTED_UNSIGNED_P (op)
574 && GET_MODE (XEXP (op, 0)) == mode)
575 return XEXP (op, 0);
577 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
578 if (POINTERS_EXTEND_UNSIGNED > 0
579 && mode == Pmode && GET_MODE (op) == ptr_mode
580 && (CONSTANT_P (op)
581 || (GET_CODE (op) == SUBREG
582 && REG_P (SUBREG_REG (op))
583 && REG_POINTER (SUBREG_REG (op))
584 && GET_MODE (SUBREG_REG (op)) == Pmode)))
585 return convert_memory_address (Pmode, op);
586 #endif
587 break;
589 default:
590 break;
593 return 0;
596 /* Try to compute the value of a unary operation CODE whose output mode is to
597 be MODE with input operand OP whose mode was originally OP_MODE.
598 Return zero if the value cannot be computed. */
600 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
601 rtx op, enum machine_mode op_mode)
603 unsigned int width = GET_MODE_BITSIZE (mode);
605 if (code == VEC_DUPLICATE)
607 gcc_assert (VECTOR_MODE_P (mode));
608 if (GET_MODE (op) != VOIDmode)
610 if (!VECTOR_MODE_P (GET_MODE (op)))
611 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
612 else
613 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
614 (GET_MODE (op)));
616 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
617 || GET_CODE (op) == CONST_VECTOR)
619 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
620 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
621 rtvec v = rtvec_alloc (n_elts);
622 unsigned int i;
624 if (GET_CODE (op) != CONST_VECTOR)
625 for (i = 0; i < n_elts; i++)
626 RTVEC_ELT (v, i) = op;
627 else
629 enum machine_mode inmode = GET_MODE (op);
630 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
631 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
633 gcc_assert (in_n_elts < n_elts);
634 gcc_assert ((n_elts % in_n_elts) == 0);
635 for (i = 0; i < n_elts; i++)
636 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
638 return gen_rtx_CONST_VECTOR (mode, v);
642 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
644 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
645 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
646 enum machine_mode opmode = GET_MODE (op);
647 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
648 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
649 rtvec v = rtvec_alloc (n_elts);
650 unsigned int i;
652 gcc_assert (op_n_elts == n_elts);
653 for (i = 0; i < n_elts; i++)
655 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
656 CONST_VECTOR_ELT (op, i),
657 GET_MODE_INNER (opmode));
658 if (!x)
659 return 0;
660 RTVEC_ELT (v, i) = x;
662 return gen_rtx_CONST_VECTOR (mode, v);
665 /* The order of these tests is critical so that, for example, we don't
666 check the wrong mode (input vs. output) for a conversion operation,
667 such as FIX. At some point, this should be simplified. */
669 if (code == FLOAT && GET_MODE (op) == VOIDmode
670 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
672 HOST_WIDE_INT hv, lv;
673 REAL_VALUE_TYPE d;
675 if (GET_CODE (op) == CONST_INT)
676 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
677 else
678 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
680 REAL_VALUE_FROM_INT (d, lv, hv, mode);
681 d = real_value_truncate (mode, d);
682 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
684 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
685 && (GET_CODE (op) == CONST_DOUBLE
686 || GET_CODE (op) == CONST_INT))
688 HOST_WIDE_INT hv, lv;
689 REAL_VALUE_TYPE d;
691 if (GET_CODE (op) == CONST_INT)
692 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
693 else
694 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
696 if (op_mode == VOIDmode)
698 /* We don't know how to interpret negative-looking numbers in
699 this case, so don't try to fold those. */
700 if (hv < 0)
701 return 0;
703 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
705 else
706 hv = 0, lv &= GET_MODE_MASK (op_mode);
708 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
709 d = real_value_truncate (mode, d);
710 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
713 if (GET_CODE (op) == CONST_INT
714 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
716 HOST_WIDE_INT arg0 = INTVAL (op);
717 HOST_WIDE_INT val;
719 switch (code)
721 case NOT:
722 val = ~ arg0;
723 break;
725 case NEG:
726 val = - arg0;
727 break;
729 case ABS:
730 val = (arg0 >= 0 ? arg0 : - arg0);
731 break;
733 case FFS:
734 /* Don't use ffs here. Instead, get low order bit and then its
735 number. If arg0 is zero, this will return 0, as desired. */
736 arg0 &= GET_MODE_MASK (mode);
737 val = exact_log2 (arg0 & (- arg0)) + 1;
738 break;
740 case CLZ:
741 arg0 &= GET_MODE_MASK (mode);
742 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
744 else
745 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
746 break;
748 case CTZ:
749 arg0 &= GET_MODE_MASK (mode);
750 if (arg0 == 0)
752 /* Even if the value at zero is undefined, we have to come
753 up with some replacement. Seems good enough. */
754 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
755 val = GET_MODE_BITSIZE (mode);
757 else
758 val = exact_log2 (arg0 & -arg0);
759 break;
761 case POPCOUNT:
762 arg0 &= GET_MODE_MASK (mode);
763 val = 0;
764 while (arg0)
765 val++, arg0 &= arg0 - 1;
766 break;
768 case PARITY:
769 arg0 &= GET_MODE_MASK (mode);
770 val = 0;
771 while (arg0)
772 val++, arg0 &= arg0 - 1;
773 val &= 1;
774 break;
776 case TRUNCATE:
777 val = arg0;
778 break;
780 case ZERO_EXTEND:
781 /* When zero-extending a CONST_INT, we need to know its
782 original mode. */
783 gcc_assert (op_mode != VOIDmode);
784 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
786 /* If we were really extending the mode,
787 we would have to distinguish between zero-extension
788 and sign-extension. */
789 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
790 val = arg0;
792 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
793 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
794 else
795 return 0;
796 break;
798 case SIGN_EXTEND:
799 if (op_mode == VOIDmode)
800 op_mode = mode;
801 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
803 /* If we were really extending the mode,
804 we would have to distinguish between zero-extension
805 and sign-extension. */
806 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
807 val = arg0;
809 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
812 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
813 if (val
814 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
815 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
817 else
818 return 0;
819 break;
821 case SQRT:
822 case FLOAT_EXTEND:
823 case FLOAT_TRUNCATE:
824 case SS_TRUNCATE:
825 case US_TRUNCATE:
826 return 0;
828 default:
829 gcc_unreachable ();
832 return gen_int_mode (val, mode);
835 /* We can do some operations on integer CONST_DOUBLEs. Also allow
836 for a DImode operation on a CONST_INT. */
837 else if (GET_MODE (op) == VOIDmode
838 && width <= HOST_BITS_PER_WIDE_INT * 2
839 && (GET_CODE (op) == CONST_DOUBLE
840 || GET_CODE (op) == CONST_INT))
842 unsigned HOST_WIDE_INT l1, lv;
843 HOST_WIDE_INT h1, hv;
845 if (GET_CODE (op) == CONST_DOUBLE)
846 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
847 else
848 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
850 switch (code)
852 case NOT:
853 lv = ~ l1;
854 hv = ~ h1;
855 break;
857 case NEG:
858 neg_double (l1, h1, &lv, &hv);
859 break;
861 case ABS:
862 if (h1 < 0)
863 neg_double (l1, h1, &lv, &hv);
864 else
865 lv = l1, hv = h1;
866 break;
868 case FFS:
869 hv = 0;
870 if (l1 == 0)
872 if (h1 == 0)
873 lv = 0;
874 else
875 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
877 else
878 lv = exact_log2 (l1 & -l1) + 1;
879 break;
881 case CLZ:
882 hv = 0;
883 if (h1 != 0)
884 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
885 - HOST_BITS_PER_WIDE_INT;
886 else if (l1 != 0)
887 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
888 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
889 lv = GET_MODE_BITSIZE (mode);
890 break;
892 case CTZ:
893 hv = 0;
894 if (l1 != 0)
895 lv = exact_log2 (l1 & -l1);
896 else if (h1 != 0)
897 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
898 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
899 lv = GET_MODE_BITSIZE (mode);
900 break;
902 case POPCOUNT:
903 hv = 0;
904 lv = 0;
905 while (l1)
906 lv++, l1 &= l1 - 1;
907 while (h1)
908 lv++, h1 &= h1 - 1;
909 break;
911 case PARITY:
912 hv = 0;
913 lv = 0;
914 while (l1)
915 lv++, l1 &= l1 - 1;
916 while (h1)
917 lv++, h1 &= h1 - 1;
918 lv &= 1;
919 break;
921 case TRUNCATE:
922 /* This is just a change-of-mode, so do nothing. */
923 lv = l1, hv = h1;
924 break;
926 case ZERO_EXTEND:
927 gcc_assert (op_mode != VOIDmode);
929 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
930 return 0;
932 hv = 0;
933 lv = l1 & GET_MODE_MASK (op_mode);
934 break;
936 case SIGN_EXTEND:
937 if (op_mode == VOIDmode
938 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
939 return 0;
940 else
942 lv = l1 & GET_MODE_MASK (op_mode);
943 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
944 && (lv & ((HOST_WIDE_INT) 1
945 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
946 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
948 hv = HWI_SIGN_EXTEND (lv);
950 break;
952 case SQRT:
953 return 0;
955 default:
956 return 0;
959 return immed_double_const (lv, hv, mode);
962 else if (GET_CODE (op) == CONST_DOUBLE
963 && GET_MODE_CLASS (mode) == MODE_FLOAT)
965 REAL_VALUE_TYPE d, t;
966 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
968 switch (code)
970 case SQRT:
971 if (HONOR_SNANS (mode) && real_isnan (&d))
972 return 0;
973 real_sqrt (&t, mode, &d);
974 d = t;
975 break;
976 case ABS:
977 d = REAL_VALUE_ABS (d);
978 break;
979 case NEG:
980 d = REAL_VALUE_NEGATE (d);
981 break;
982 case FLOAT_TRUNCATE:
983 d = real_value_truncate (mode, d);
984 break;
985 case FLOAT_EXTEND:
986 /* All this does is change the mode. */
987 break;
988 case FIX:
989 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
990 break;
991 case NOT:
993 long tmp[4];
994 int i;
996 real_to_target (tmp, &d, GET_MODE (op));
997 for (i = 0; i < 4; i++)
998 tmp[i] = ~tmp[i];
999 real_from_target (&d, tmp, mode);
1000 break;
1002 default:
1003 gcc_unreachable ();
1005 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1008 else if (GET_CODE (op) == CONST_DOUBLE
1009 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
1010 && GET_MODE_CLASS (mode) == MODE_INT
1011 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1013 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1014 operators are intentionally left unspecified (to ease implementation
1015 by target backends), for consistency, this routine implements the
1016 same semantics for constant folding as used by the middle-end. */
1018 /* This was formerly used only for non-IEEE float.
1019 eggert@twinsun.com says it is safe for IEEE also. */
1020 HOST_WIDE_INT xh, xl, th, tl;
1021 REAL_VALUE_TYPE x, t;
1022 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1023 switch (code)
1025 case FIX:
1026 if (REAL_VALUE_ISNAN (x))
1027 return const0_rtx;
1029 /* Test against the signed upper bound. */
1030 if (width > HOST_BITS_PER_WIDE_INT)
1032 th = ((unsigned HOST_WIDE_INT) 1
1033 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1034 tl = -1;
1036 else
1038 th = 0;
1039 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1041 real_from_integer (&t, VOIDmode, tl, th, 0);
1042 if (REAL_VALUES_LESS (t, x))
1044 xh = th;
1045 xl = tl;
1046 break;
1049 /* Test against the signed lower bound. */
1050 if (width > HOST_BITS_PER_WIDE_INT)
1052 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1053 tl = 0;
1055 else
1057 th = -1;
1058 tl = (HOST_WIDE_INT) -1 << (width - 1);
1060 real_from_integer (&t, VOIDmode, tl, th, 0);
1061 if (REAL_VALUES_LESS (x, t))
1063 xh = th;
1064 xl = tl;
1065 break;
1067 REAL_VALUE_TO_INT (&xl, &xh, x);
1068 break;
1070 case UNSIGNED_FIX:
1071 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1072 return const0_rtx;
1074 /* Test against the unsigned upper bound. */
1075 if (width == 2*HOST_BITS_PER_WIDE_INT)
1077 th = -1;
1078 tl = -1;
1080 else if (width >= HOST_BITS_PER_WIDE_INT)
1082 th = ((unsigned HOST_WIDE_INT) 1
1083 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1084 tl = -1;
1086 else
1088 th = 0;
1089 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1091 real_from_integer (&t, VOIDmode, tl, th, 1);
1092 if (REAL_VALUES_LESS (t, x))
1094 xh = th;
1095 xl = tl;
1096 break;
1099 REAL_VALUE_TO_INT (&xl, &xh, x);
1100 break;
1102 default:
1103 gcc_unreachable ();
1105 return immed_double_const (xl, xh, mode);
1108 return NULL_RTX;
1111 /* Subroutine of simplify_binary_operation to simplify a commutative,
1112 associative binary operation CODE with result mode MODE, operating
1113 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1114 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1115 canonicalization is possible. */
1117 static rtx
1118 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1119 rtx op0, rtx op1)
1121 rtx tem;
1123 /* Linearize the operator to the left. */
1124 if (GET_CODE (op1) == code)
1126 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1127 if (GET_CODE (op0) == code)
1129 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1130 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1133 /* "a op (b op c)" becomes "(b op c) op a". */
1134 if (! swap_commutative_operands_p (op1, op0))
1135 return simplify_gen_binary (code, mode, op1, op0);
1137 tem = op0;
1138 op0 = op1;
1139 op1 = tem;
1142 if (GET_CODE (op0) == code)
1144 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1145 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1147 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1148 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1151 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1152 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1153 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1154 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1155 if (tem != 0)
1156 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1158 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1159 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1160 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1161 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1162 if (tem != 0)
1163 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1166 return 0;
1170 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1171 and OP1. Return 0 if no simplification is possible.
1173 Don't use this for relational operations such as EQ or LT.
1174 Use simplify_relational_operation instead. */
1176 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1177 rtx op0, rtx op1)
1179 rtx trueop0, trueop1;
1180 rtx tem;
1182 /* Relational operations don't work here. We must know the mode
1183 of the operands in order to do the comparison correctly.
1184 Assuming a full word can give incorrect results.
1185 Consider comparing 128 with -128 in QImode. */
1186 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1187 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1189 /* Make sure the constant is second. */
1190 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1191 && swap_commutative_operands_p (op0, op1))
1193 tem = op0, op0 = op1, op1 = tem;
1196 trueop0 = avoid_constant_pool_reference (op0);
1197 trueop1 = avoid_constant_pool_reference (op1);
1199 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1200 if (tem)
1201 return tem;
1202 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1205 static rtx
1206 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1207 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1209 rtx tem;
1210 HOST_WIDE_INT val;
1211 unsigned int width = GET_MODE_BITSIZE (mode);
1213 /* Even if we can't compute a constant result,
1214 there are some cases worth simplifying. */
1216 switch (code)
1218 case PLUS:
1219 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1220 when x is NaN, infinite, or finite and nonzero. They aren't
1221 when x is -0 and the rounding mode is not towards -infinity,
1222 since (-0) + 0 is then 0. */
1223 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1224 return op0;
1226 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1227 transformations are safe even for IEEE. */
1228 if (GET_CODE (op0) == NEG)
1229 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1230 else if (GET_CODE (op1) == NEG)
1231 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1233 /* (~a) + 1 -> -a */
1234 if (INTEGRAL_MODE_P (mode)
1235 && GET_CODE (op0) == NOT
1236 && trueop1 == const1_rtx)
1237 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1239 /* Handle both-operands-constant cases. We can only add
1240 CONST_INTs to constants since the sum of relocatable symbols
1241 can't be handled by most assemblers. Don't add CONST_INT
1242 to CONST_INT since overflow won't be computed properly if wider
1243 than HOST_BITS_PER_WIDE_INT. */
1245 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1246 && GET_CODE (op1) == CONST_INT)
1247 return plus_constant (op0, INTVAL (op1));
1248 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1249 && GET_CODE (op0) == CONST_INT)
1250 return plus_constant (op1, INTVAL (op0));
1252 /* See if this is something like X * C - X or vice versa or
1253 if the multiplication is written as a shift. If so, we can
1254 distribute and make a new multiply, shift, or maybe just
1255 have X (if C is 2 in the example above). But don't make
1256 something more expensive than we had before. */
1258 if (! FLOAT_MODE_P (mode))
1260 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1261 rtx lhs = op0, rhs = op1;
1263 if (GET_CODE (lhs) == NEG)
1264 coeff0 = -1, lhs = XEXP (lhs, 0);
1265 else if (GET_CODE (lhs) == MULT
1266 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1267 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1268 else if (GET_CODE (lhs) == ASHIFT
1269 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1270 && INTVAL (XEXP (lhs, 1)) >= 0
1271 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1273 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1274 lhs = XEXP (lhs, 0);
1277 if (GET_CODE (rhs) == NEG)
1278 coeff1 = -1, rhs = XEXP (rhs, 0);
1279 else if (GET_CODE (rhs) == MULT
1280 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1282 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1284 else if (GET_CODE (rhs) == ASHIFT
1285 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1286 && INTVAL (XEXP (rhs, 1)) >= 0
1287 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1289 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1290 rhs = XEXP (rhs, 0);
1293 if (rtx_equal_p (lhs, rhs))
1295 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1296 tem = simplify_gen_binary (MULT, mode, lhs,
1297 GEN_INT (coeff0 + coeff1));
1298 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1299 ? tem : 0;
1303 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1304 if ((GET_CODE (op1) == CONST_INT
1305 || GET_CODE (op1) == CONST_DOUBLE)
1306 && GET_CODE (op0) == XOR
1307 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1308 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1309 && mode_signbit_p (mode, op1))
1310 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1311 simplify_gen_binary (XOR, mode, op1,
1312 XEXP (op0, 1)));
1314 /* If one of the operands is a PLUS or a MINUS, see if we can
1315 simplify this by the associative law.
1316 Don't use the associative law for floating point.
1317 The inaccuracy makes it nonassociative,
1318 and subtle programs can break if operations are associated. */
1320 if (INTEGRAL_MODE_P (mode)
1321 && (plus_minus_operand_p (op0)
1322 || plus_minus_operand_p (op1))
1323 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1324 return tem;
1326 /* Reassociate floating point addition only when the user
1327 specifies unsafe math optimizations. */
1328 if (FLOAT_MODE_P (mode)
1329 && flag_unsafe_math_optimizations)
1331 tem = simplify_associative_operation (code, mode, op0, op1);
1332 if (tem)
1333 return tem;
1335 break;
1337 case COMPARE:
1338 #ifdef HAVE_cc0
1339 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1340 using cc0, in which case we want to leave it as a COMPARE
1341 so we can distinguish it from a register-register-copy.
1343 In IEEE floating point, x-0 is not the same as x. */
1345 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1346 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1347 && trueop1 == CONST0_RTX (mode))
1348 return op0;
1349 #endif
1351 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1352 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1353 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1354 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1356 rtx xop00 = XEXP (op0, 0);
1357 rtx xop10 = XEXP (op1, 0);
1359 #ifdef HAVE_cc0
1360 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1361 #else
1362 if (REG_P (xop00) && REG_P (xop10)
1363 && GET_MODE (xop00) == GET_MODE (xop10)
1364 && REGNO (xop00) == REGNO (xop10)
1365 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1366 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1367 #endif
1368 return xop00;
1370 break;
1372 case MINUS:
1373 /* We can't assume x-x is 0 even with non-IEEE floating point,
1374 but since it is zero except in very strange circumstances, we
1375 will treat it as zero with -funsafe-math-optimizations. */
1376 if (rtx_equal_p (trueop0, trueop1)
1377 && ! side_effects_p (op0)
1378 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1379 return CONST0_RTX (mode);
1381 /* Change subtraction from zero into negation. (0 - x) is the
1382 same as -x when x is NaN, infinite, or finite and nonzero.
1383 But if the mode has signed zeros, and does not round towards
1384 -infinity, then 0 - 0 is 0, not -0. */
1385 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1386 return simplify_gen_unary (NEG, mode, op1, mode);
1388 /* (-1 - a) is ~a. */
1389 if (trueop0 == constm1_rtx)
1390 return simplify_gen_unary (NOT, mode, op1, mode);
1392 /* Subtracting 0 has no effect unless the mode has signed zeros
1393 and supports rounding towards -infinity. In such a case,
1394 0 - 0 is -0. */
1395 if (!(HONOR_SIGNED_ZEROS (mode)
1396 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1397 && trueop1 == CONST0_RTX (mode))
1398 return op0;
1400 /* See if this is something like X * C - X or vice versa or
1401 if the multiplication is written as a shift. If so, we can
1402 distribute and make a new multiply, shift, or maybe just
1403 have X (if C is 2 in the example above). But don't make
1404 something more expensive than we had before. */
1406 if (! FLOAT_MODE_P (mode))
1408 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1409 rtx lhs = op0, rhs = op1;
1411 if (GET_CODE (lhs) == NEG)
1412 coeff0 = -1, lhs = XEXP (lhs, 0);
1413 else if (GET_CODE (lhs) == MULT
1414 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1416 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1418 else if (GET_CODE (lhs) == ASHIFT
1419 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1420 && INTVAL (XEXP (lhs, 1)) >= 0
1421 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1423 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1424 lhs = XEXP (lhs, 0);
1427 if (GET_CODE (rhs) == NEG)
1428 coeff1 = - 1, rhs = XEXP (rhs, 0);
1429 else if (GET_CODE (rhs) == MULT
1430 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1432 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1434 else if (GET_CODE (rhs) == ASHIFT
1435 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1436 && INTVAL (XEXP (rhs, 1)) >= 0
1437 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1439 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1440 rhs = XEXP (rhs, 0);
1443 if (rtx_equal_p (lhs, rhs))
1445 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1446 tem = simplify_gen_binary (MULT, mode, lhs,
1447 GEN_INT (coeff0 - coeff1));
1448 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1449 ? tem : 0;
1453 /* (a - (-b)) -> (a + b). True even for IEEE. */
1454 if (GET_CODE (op1) == NEG)
1455 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1457 /* (-x - c) may be simplified as (-c - x). */
1458 if (GET_CODE (op0) == NEG
1459 && (GET_CODE (op1) == CONST_INT
1460 || GET_CODE (op1) == CONST_DOUBLE))
1462 tem = simplify_unary_operation (NEG, mode, op1, mode);
1463 if (tem)
1464 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1467 /* If one of the operands is a PLUS or a MINUS, see if we can
1468 simplify this by the associative law.
1469 Don't use the associative law for floating point.
1470 The inaccuracy makes it nonassociative,
1471 and subtle programs can break if operations are associated. */
1473 if (INTEGRAL_MODE_P (mode)
1474 && (plus_minus_operand_p (op0)
1475 || plus_minus_operand_p (op1))
1476 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1477 return tem;
1479 /* Don't let a relocatable value get a negative coeff. */
1480 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1481 return simplify_gen_binary (PLUS, mode,
1482 op0,
1483 neg_const_int (mode, op1));
1485 /* (x - (x & y)) -> (x & ~y) */
1486 if (GET_CODE (op1) == AND)
1488 if (rtx_equal_p (op0, XEXP (op1, 0)))
1490 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1491 GET_MODE (XEXP (op1, 1)));
1492 return simplify_gen_binary (AND, mode, op0, tem);
1494 if (rtx_equal_p (op0, XEXP (op1, 1)))
1496 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1497 GET_MODE (XEXP (op1, 0)));
1498 return simplify_gen_binary (AND, mode, op0, tem);
1501 break;
1503 case MULT:
1504 if (trueop1 == constm1_rtx)
1505 return simplify_gen_unary (NEG, mode, op0, mode);
1507 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1508 x is NaN, since x * 0 is then also NaN. Nor is it valid
1509 when the mode has signed zeros, since multiplying a negative
1510 number by 0 will give -0, not 0. */
1511 if (!HONOR_NANS (mode)
1512 && !HONOR_SIGNED_ZEROS (mode)
1513 && trueop1 == CONST0_RTX (mode)
1514 && ! side_effects_p (op0))
1515 return op1;
1517 /* In IEEE floating point, x*1 is not equivalent to x for
1518 signalling NaNs. */
1519 if (!HONOR_SNANS (mode)
1520 && trueop1 == CONST1_RTX (mode))
1521 return op0;
1523 /* Convert multiply by constant power of two into shift unless
1524 we are still generating RTL. This test is a kludge. */
1525 if (GET_CODE (trueop1) == CONST_INT
1526 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1527 /* If the mode is larger than the host word size, and the
1528 uppermost bit is set, then this isn't a power of two due
1529 to implicit sign extension. */
1530 && (width <= HOST_BITS_PER_WIDE_INT
1531 || val != HOST_BITS_PER_WIDE_INT - 1))
1532 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1534 /* x*2 is x+x and x*(-1) is -x */
1535 if (GET_CODE (trueop1) == CONST_DOUBLE
1536 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1537 && GET_MODE (op0) == mode)
1539 REAL_VALUE_TYPE d;
1540 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1542 if (REAL_VALUES_EQUAL (d, dconst2))
1543 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1545 if (REAL_VALUES_EQUAL (d, dconstm1))
1546 return simplify_gen_unary (NEG, mode, op0, mode);
1549 /* Reassociate multiplication, but for floating point MULTs
1550 only when the user specifies unsafe math optimizations. */
1551 if (! FLOAT_MODE_P (mode)
1552 || flag_unsafe_math_optimizations)
1554 tem = simplify_associative_operation (code, mode, op0, op1);
1555 if (tem)
1556 return tem;
1558 break;
1560 case IOR:
1561 if (trueop1 == const0_rtx)
1562 return op0;
1563 if (GET_CODE (trueop1) == CONST_INT
1564 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1565 == GET_MODE_MASK (mode)))
1566 return op1;
1567 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1568 return op0;
1569 /* A | (~A) -> -1 */
1570 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1571 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1572 && ! side_effects_p (op0)
1573 && GET_MODE_CLASS (mode) != MODE_CC)
1574 return constm1_rtx;
1575 tem = simplify_associative_operation (code, mode, op0, op1);
1576 if (tem)
1577 return tem;
1578 break;
1580 case XOR:
1581 if (trueop1 == const0_rtx)
1582 return op0;
1583 if (GET_CODE (trueop1) == CONST_INT
1584 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1585 == GET_MODE_MASK (mode)))
1586 return simplify_gen_unary (NOT, mode, op0, mode);
1587 if (trueop0 == trueop1
1588 && ! side_effects_p (op0)
1589 && GET_MODE_CLASS (mode) != MODE_CC)
1590 return const0_rtx;
1592 /* Canonicalize XOR of the most significant bit to PLUS. */
1593 if ((GET_CODE (op1) == CONST_INT
1594 || GET_CODE (op1) == CONST_DOUBLE)
1595 && mode_signbit_p (mode, op1))
1596 return simplify_gen_binary (PLUS, mode, op0, op1);
1597 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1598 if ((GET_CODE (op1) == CONST_INT
1599 || GET_CODE (op1) == CONST_DOUBLE)
1600 && GET_CODE (op0) == PLUS
1601 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1602 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1603 && mode_signbit_p (mode, XEXP (op0, 1)))
1604 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1605 simplify_gen_binary (XOR, mode, op1,
1606 XEXP (op0, 1)));
1608 tem = simplify_associative_operation (code, mode, op0, op1);
1609 if (tem)
1610 return tem;
1611 break;
1613 case AND:
1614 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1615 return const0_rtx;
1616 /* If we are turning off bits already known off in OP0, we need
1617 not do an AND. */
1618 if (GET_CODE (trueop1) == CONST_INT
1619 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1620 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1621 return op0;
1622 if (trueop0 == trueop1 && ! side_effects_p (op0)
1623 && GET_MODE_CLASS (mode) != MODE_CC)
1624 return op0;
1625 /* A & (~A) -> 0 */
1626 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1627 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1628 && ! side_effects_p (op0)
1629 && GET_MODE_CLASS (mode) != MODE_CC)
1630 return const0_rtx;
1632 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1633 there are no nonzero bits of C outside of X's mode. */
1634 if ((GET_CODE (op0) == SIGN_EXTEND
1635 || GET_CODE (op0) == ZERO_EXTEND)
1636 && GET_CODE (trueop1) == CONST_INT
1637 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1638 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1639 & INTVAL (trueop1)) == 0)
1641 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1642 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1643 gen_int_mode (INTVAL (trueop1),
1644 imode));
1645 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1648 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1649 ((A & N) + B) & M -> (A + B) & M
1650 Similarly if (N & M) == 0,
1651 ((A | N) + B) & M -> (A + B) & M
1652 and for - instead of + and/or ^ instead of |. */
1653 if (GET_CODE (trueop1) == CONST_INT
1654 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1655 && ~INTVAL (trueop1)
1656 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1657 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1659 rtx pmop[2];
1660 int which;
1662 pmop[0] = XEXP (op0, 0);
1663 pmop[1] = XEXP (op0, 1);
1665 for (which = 0; which < 2; which++)
1667 tem = pmop[which];
1668 switch (GET_CODE (tem))
1670 case AND:
1671 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1672 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1673 == INTVAL (trueop1))
1674 pmop[which] = XEXP (tem, 0);
1675 break;
1676 case IOR:
1677 case XOR:
1678 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1679 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1680 pmop[which] = XEXP (tem, 0);
1681 break;
1682 default:
1683 break;
1687 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1689 tem = simplify_gen_binary (GET_CODE (op0), mode,
1690 pmop[0], pmop[1]);
1691 return simplify_gen_binary (code, mode, tem, op1);
1694 tem = simplify_associative_operation (code, mode, op0, op1);
1695 if (tem)
1696 return tem;
1697 break;
1699 case UDIV:
1700 /* 0/x is 0 (or x&0 if x has side-effects). */
1701 if (trueop0 == const0_rtx)
1702 return side_effects_p (op1)
1703 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1704 : const0_rtx;
1705 /* x/1 is x. */
1706 if (trueop1 == const1_rtx)
1707 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1708 /* Convert divide by power of two into shift. */
1709 if (GET_CODE (trueop1) == CONST_INT
1710 && (val = exact_log2 (INTVAL (trueop1))) > 0)
1711 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
1712 break;
1714 case DIV:
1715 /* Handle floating point and integers separately. */
1716 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1718 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1719 safe for modes with NaNs, since 0.0 / 0.0 will then be
1720 NaN rather than 0.0. Nor is it safe for modes with signed
1721 zeros, since dividing 0 by a negative number gives -0.0 */
1722 if (trueop0 == CONST0_RTX (mode)
1723 && !HONOR_NANS (mode)
1724 && !HONOR_SIGNED_ZEROS (mode)
1725 && ! side_effects_p (op1))
1726 return op0;
1727 /* x/1.0 is x. */
1728 if (trueop1 == CONST1_RTX (mode)
1729 && !HONOR_SNANS (mode))
1730 return op0;
1732 if (GET_CODE (trueop1) == CONST_DOUBLE
1733 && trueop1 != CONST0_RTX (mode))
1735 REAL_VALUE_TYPE d;
1736 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1738 /* x/-1.0 is -x. */
1739 if (REAL_VALUES_EQUAL (d, dconstm1)
1740 && !HONOR_SNANS (mode))
1741 return simplify_gen_unary (NEG, mode, op0, mode);
1743 /* Change FP division by a constant into multiplication.
1744 Only do this with -funsafe-math-optimizations. */
1745 if (flag_unsafe_math_optimizations
1746 && !REAL_VALUES_EQUAL (d, dconst0))
1748 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1749 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1750 return simplify_gen_binary (MULT, mode, op0, tem);
1754 else
1756 /* 0/x is 0 (or x&0 if x has side-effects). */
1757 if (trueop0 == const0_rtx)
1758 return side_effects_p (op1)
1759 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1760 : const0_rtx;
1761 /* x/1 is x. */
1762 if (trueop1 == const1_rtx)
1763 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1764 /* x/-1 is -x. */
1765 if (trueop1 == constm1_rtx)
1767 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
1768 return simplify_gen_unary (NEG, mode, x, mode);
1771 break;
1773 case UMOD:
1774 /* 0%x is 0 (or x&0 if x has side-effects). */
1775 if (trueop0 == const0_rtx)
1776 return side_effects_p (op1)
1777 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1778 : const0_rtx;
1779 /* x%1 is 0 (of x&0 if x has side-effects). */
1780 if (trueop1 == const1_rtx)
1781 return side_effects_p (op0)
1782 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
1783 : const0_rtx;
1784 /* Implement modulus by power of two as AND. */
1785 if (GET_CODE (trueop1) == CONST_INT
1786 && exact_log2 (INTVAL (trueop1)) > 0)
1787 return simplify_gen_binary (AND, mode, op0,
1788 GEN_INT (INTVAL (op1) - 1));
1789 break;
1791 case MOD:
1792 /* 0%x is 0 (or x&0 if x has side-effects). */
1793 if (trueop0 == const0_rtx)
1794 return side_effects_p (op1)
1795 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1796 : const0_rtx;
1797 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1798 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
1799 return side_effects_p (op0)
1800 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
1801 : const0_rtx;
1802 break;
1804 case ROTATERT:
1805 case ROTATE:
1806 case ASHIFTRT:
1807 /* Rotating ~0 always results in ~0. */
1808 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1809 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1810 && ! side_effects_p (op1))
1811 return op0;
1813 /* Fall through.... */
1815 case ASHIFT:
1816 case LSHIFTRT:
1817 if (trueop1 == const0_rtx)
1818 return op0;
1819 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1820 return op0;
1821 break;
1823 case SMIN:
1824 if (width <= HOST_BITS_PER_WIDE_INT
1825 && GET_CODE (trueop1) == CONST_INT
1826 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1827 && ! side_effects_p (op0))
1828 return op1;
1829 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1830 return op0;
1831 tem = simplify_associative_operation (code, mode, op0, op1);
1832 if (tem)
1833 return tem;
1834 break;
1836 case SMAX:
1837 if (width <= HOST_BITS_PER_WIDE_INT
1838 && GET_CODE (trueop1) == CONST_INT
1839 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1840 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1841 && ! side_effects_p (op0))
1842 return op1;
1843 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1844 return op0;
1845 tem = simplify_associative_operation (code, mode, op0, op1);
1846 if (tem)
1847 return tem;
1848 break;
1850 case UMIN:
1851 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1852 return op1;
1853 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1854 return op0;
1855 tem = simplify_associative_operation (code, mode, op0, op1);
1856 if (tem)
1857 return tem;
1858 break;
1860 case UMAX:
1861 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1862 return op1;
1863 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1864 return op0;
1865 tem = simplify_associative_operation (code, mode, op0, op1);
1866 if (tem)
1867 return tem;
1868 break;
1870 case SS_PLUS:
1871 case US_PLUS:
1872 case SS_MINUS:
1873 case US_MINUS:
1874 /* ??? There are simplifications that can be done. */
1875 return 0;
1877 case VEC_SELECT:
1878 if (!VECTOR_MODE_P (mode))
1880 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1881 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
1882 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1883 gcc_assert (XVECLEN (trueop1, 0) == 1);
1884 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
1886 if (GET_CODE (trueop0) == CONST_VECTOR)
1887 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
1888 (trueop1, 0, 0)));
1890 else
1892 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1893 gcc_assert (GET_MODE_INNER (mode)
1894 == GET_MODE_INNER (GET_MODE (trueop0)));
1895 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1897 if (GET_CODE (trueop0) == CONST_VECTOR)
1899 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1900 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1901 rtvec v = rtvec_alloc (n_elts);
1902 unsigned int i;
1904 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
1905 for (i = 0; i < n_elts; i++)
1907 rtx x = XVECEXP (trueop1, 0, i);
1909 gcc_assert (GET_CODE (x) == CONST_INT);
1910 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
1911 INTVAL (x));
1914 return gen_rtx_CONST_VECTOR (mode, v);
1917 return 0;
1918 case VEC_CONCAT:
1920 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1921 ? GET_MODE (trueop0)
1922 : GET_MODE_INNER (mode));
1923 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1924 ? GET_MODE (trueop1)
1925 : GET_MODE_INNER (mode));
1927 gcc_assert (VECTOR_MODE_P (mode));
1928 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1929 == GET_MODE_SIZE (mode));
1931 if (VECTOR_MODE_P (op0_mode))
1932 gcc_assert (GET_MODE_INNER (mode)
1933 == GET_MODE_INNER (op0_mode));
1934 else
1935 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
1937 if (VECTOR_MODE_P (op1_mode))
1938 gcc_assert (GET_MODE_INNER (mode)
1939 == GET_MODE_INNER (op1_mode));
1940 else
1941 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
1943 if ((GET_CODE (trueop0) == CONST_VECTOR
1944 || GET_CODE (trueop0) == CONST_INT
1945 || GET_CODE (trueop0) == CONST_DOUBLE)
1946 && (GET_CODE (trueop1) == CONST_VECTOR
1947 || GET_CODE (trueop1) == CONST_INT
1948 || GET_CODE (trueop1) == CONST_DOUBLE))
1950 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1951 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1952 rtvec v = rtvec_alloc (n_elts);
1953 unsigned int i;
1954 unsigned in_n_elts = 1;
1956 if (VECTOR_MODE_P (op0_mode))
1957 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1958 for (i = 0; i < n_elts; i++)
1960 if (i < in_n_elts)
1962 if (!VECTOR_MODE_P (op0_mode))
1963 RTVEC_ELT (v, i) = trueop0;
1964 else
1965 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1967 else
1969 if (!VECTOR_MODE_P (op1_mode))
1970 RTVEC_ELT (v, i) = trueop1;
1971 else
1972 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1973 i - in_n_elts);
1977 return gen_rtx_CONST_VECTOR (mode, v);
1980 return 0;
1982 default:
1983 gcc_unreachable ();
1986 return 0;
1990 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
1991 rtx op0, rtx op1)
1993 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1994 HOST_WIDE_INT val;
1995 unsigned int width = GET_MODE_BITSIZE (mode);
1997 if (VECTOR_MODE_P (mode)
1998 && code != VEC_CONCAT
1999 && GET_CODE (op0) == CONST_VECTOR
2000 && GET_CODE (op1) == CONST_VECTOR)
2002 unsigned n_elts = GET_MODE_NUNITS (mode);
2003 enum machine_mode op0mode = GET_MODE (op0);
2004 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2005 enum machine_mode op1mode = GET_MODE (op1);
2006 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2007 rtvec v = rtvec_alloc (n_elts);
2008 unsigned int i;
2010 gcc_assert (op0_n_elts == n_elts);
2011 gcc_assert (op1_n_elts == n_elts);
2012 for (i = 0; i < n_elts; i++)
2014 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2015 CONST_VECTOR_ELT (op0, i),
2016 CONST_VECTOR_ELT (op1, i));
2017 if (!x)
2018 return 0;
2019 RTVEC_ELT (v, i) = x;
2022 return gen_rtx_CONST_VECTOR (mode, v);
2025 if (VECTOR_MODE_P (mode)
2026 && code == VEC_CONCAT
2027 && CONSTANT_P (op0) && CONSTANT_P (op1))
2029 unsigned n_elts = GET_MODE_NUNITS (mode);
2030 rtvec v = rtvec_alloc (n_elts);
2032 gcc_assert (n_elts >= 2);
2033 if (n_elts == 2)
2035 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2036 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2038 RTVEC_ELT (v, 0) = op0;
2039 RTVEC_ELT (v, 1) = op1;
2041 else
2043 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2044 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2045 unsigned i;
2047 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2048 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2049 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2051 for (i = 0; i < op0_n_elts; ++i)
2052 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2053 for (i = 0; i < op1_n_elts; ++i)
2054 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2057 return gen_rtx_CONST_VECTOR (mode, v);
2060 if (GET_MODE_CLASS (mode) == MODE_FLOAT
2061 && GET_CODE (op0) == CONST_DOUBLE
2062 && GET_CODE (op1) == CONST_DOUBLE
2063 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2065 if (code == AND
2066 || code == IOR
2067 || code == XOR)
2069 long tmp0[4];
2070 long tmp1[4];
2071 REAL_VALUE_TYPE r;
2072 int i;
2074 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2075 GET_MODE (op0));
2076 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2077 GET_MODE (op1));
2078 for (i = 0; i < 4; i++)
2080 switch (code)
2082 case AND:
2083 tmp0[i] &= tmp1[i];
2084 break;
2085 case IOR:
2086 tmp0[i] |= tmp1[i];
2087 break;
2088 case XOR:
2089 tmp0[i] ^= tmp1[i];
2090 break;
2091 default:
2092 gcc_unreachable ();
2095 real_from_target (&r, tmp0, mode);
2096 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2098 else
2100 REAL_VALUE_TYPE f0, f1, value, result;
2101 bool inexact;
2103 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2104 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2105 real_convert (&f0, mode, &f0);
2106 real_convert (&f1, mode, &f1);
2108 if (HONOR_SNANS (mode)
2109 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2110 return 0;
2112 if (code == DIV
2113 && REAL_VALUES_EQUAL (f1, dconst0)
2114 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2115 return 0;
2117 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2118 && flag_trapping_math
2119 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2121 int s0 = REAL_VALUE_NEGATIVE (f0);
2122 int s1 = REAL_VALUE_NEGATIVE (f1);
2124 switch (code)
2126 case PLUS:
2127 /* Inf + -Inf = NaN plus exception. */
2128 if (s0 != s1)
2129 return 0;
2130 break;
2131 case MINUS:
2132 /* Inf - Inf = NaN plus exception. */
2133 if (s0 == s1)
2134 return 0;
2135 break;
2136 case DIV:
2137 /* Inf / Inf = NaN plus exception. */
2138 return 0;
2139 default:
2140 break;
2144 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2145 && flag_trapping_math
2146 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2147 || (REAL_VALUE_ISINF (f1)
2148 && REAL_VALUES_EQUAL (f0, dconst0))))
2149 /* Inf * 0 = NaN plus exception. */
2150 return 0;
2152 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2153 &f0, &f1);
2154 real_convert (&result, mode, &value);
2156 /* Don't constant fold this floating point operation if the
2157 result may dependent upon the run-time rounding mode and
2158 flag_rounding_math is set, or if GCC's software emulation
2159 is unable to accurately represent the result. */
2161 if ((flag_rounding_math
2162 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2163 && !flag_unsafe_math_optimizations))
2164 && (inexact || !real_identical (&result, &value)))
2165 return NULL_RTX;
2167 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2171 /* We can fold some multi-word operations. */
2172 if (GET_MODE_CLASS (mode) == MODE_INT
2173 && width == HOST_BITS_PER_WIDE_INT * 2
2174 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2175 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2177 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2178 HOST_WIDE_INT h1, h2, hv, ht;
2180 if (GET_CODE (op0) == CONST_DOUBLE)
2181 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2182 else
2183 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2185 if (GET_CODE (op1) == CONST_DOUBLE)
2186 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2187 else
2188 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2190 switch (code)
2192 case MINUS:
2193 /* A - B == A + (-B). */
2194 neg_double (l2, h2, &lv, &hv);
2195 l2 = lv, h2 = hv;
2197 /* Fall through.... */
2199 case PLUS:
2200 add_double (l1, h1, l2, h2, &lv, &hv);
2201 break;
2203 case MULT:
2204 mul_double (l1, h1, l2, h2, &lv, &hv);
2205 break;
2207 case DIV:
2208 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2209 &lv, &hv, &lt, &ht))
2210 return 0;
2211 break;
2213 case MOD:
2214 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2215 &lt, &ht, &lv, &hv))
2216 return 0;
2217 break;
2219 case UDIV:
2220 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2221 &lv, &hv, &lt, &ht))
2222 return 0;
2223 break;
2225 case UMOD:
2226 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2227 &lt, &ht, &lv, &hv))
2228 return 0;
2229 break;
2231 case AND:
2232 lv = l1 & l2, hv = h1 & h2;
2233 break;
2235 case IOR:
2236 lv = l1 | l2, hv = h1 | h2;
2237 break;
2239 case XOR:
2240 lv = l1 ^ l2, hv = h1 ^ h2;
2241 break;
2243 case SMIN:
2244 if (h1 < h2
2245 || (h1 == h2
2246 && ((unsigned HOST_WIDE_INT) l1
2247 < (unsigned HOST_WIDE_INT) l2)))
2248 lv = l1, hv = h1;
2249 else
2250 lv = l2, hv = h2;
2251 break;
2253 case SMAX:
2254 if (h1 > h2
2255 || (h1 == h2
2256 && ((unsigned HOST_WIDE_INT) l1
2257 > (unsigned HOST_WIDE_INT) l2)))
2258 lv = l1, hv = h1;
2259 else
2260 lv = l2, hv = h2;
2261 break;
2263 case UMIN:
2264 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2265 || (h1 == h2
2266 && ((unsigned HOST_WIDE_INT) l1
2267 < (unsigned HOST_WIDE_INT) l2)))
2268 lv = l1, hv = h1;
2269 else
2270 lv = l2, hv = h2;
2271 break;
2273 case UMAX:
2274 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2275 || (h1 == h2
2276 && ((unsigned HOST_WIDE_INT) l1
2277 > (unsigned HOST_WIDE_INT) l2)))
2278 lv = l1, hv = h1;
2279 else
2280 lv = l2, hv = h2;
2281 break;
2283 case LSHIFTRT: case ASHIFTRT:
2284 case ASHIFT:
2285 case ROTATE: case ROTATERT:
2286 if (SHIFT_COUNT_TRUNCATED)
2287 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2289 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2290 return 0;
2292 if (code == LSHIFTRT || code == ASHIFTRT)
2293 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2294 code == ASHIFTRT);
2295 else if (code == ASHIFT)
2296 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2297 else if (code == ROTATE)
2298 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2299 else /* code == ROTATERT */
2300 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2301 break;
2303 default:
2304 return 0;
2307 return immed_double_const (lv, hv, mode);
2310 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2311 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2313 /* Get the integer argument values in two forms:
2314 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2316 arg0 = INTVAL (op0);
2317 arg1 = INTVAL (op1);
2319 if (width < HOST_BITS_PER_WIDE_INT)
2321 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2322 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2324 arg0s = arg0;
2325 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2326 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2328 arg1s = arg1;
2329 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2330 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2332 else
2334 arg0s = arg0;
2335 arg1s = arg1;
2338 /* Compute the value of the arithmetic. */
2340 switch (code)
2342 case PLUS:
2343 val = arg0s + arg1s;
2344 break;
2346 case MINUS:
2347 val = arg0s - arg1s;
2348 break;
2350 case MULT:
2351 val = arg0s * arg1s;
2352 break;
2354 case DIV:
2355 if (arg1s == 0
2356 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2357 && arg1s == -1))
2358 return 0;
2359 val = arg0s / arg1s;
2360 break;
2362 case MOD:
2363 if (arg1s == 0
2364 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2365 && arg1s == -1))
2366 return 0;
2367 val = arg0s % arg1s;
2368 break;
2370 case UDIV:
2371 if (arg1 == 0
2372 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2373 && arg1s == -1))
2374 return 0;
2375 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2376 break;
2378 case UMOD:
2379 if (arg1 == 0
2380 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2381 && arg1s == -1))
2382 return 0;
2383 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2384 break;
2386 case AND:
2387 val = arg0 & arg1;
2388 break;
2390 case IOR:
2391 val = arg0 | arg1;
2392 break;
2394 case XOR:
2395 val = arg0 ^ arg1;
2396 break;
2398 case LSHIFTRT:
2399 case ASHIFT:
2400 case ASHIFTRT:
2401 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
2402 the value is in range. We can't return any old value for
2403 out-of-range arguments because either the middle-end (via
2404 shift_truncation_mask) or the back-end might be relying on
2405 target-specific knowledge. Nor can we rely on
2406 shift_truncation_mask, since the shift might not be part of an
2407 ashlM3, lshrM3 or ashrM3 instruction. */
2408 if (SHIFT_COUNT_TRUNCATED)
2409 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2410 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2411 return 0;
2413 val = (code == ASHIFT
2414 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2415 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2417 /* Sign-extend the result for arithmetic right shifts. */
2418 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2419 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2420 break;
2422 case ROTATERT:
2423 if (arg1 < 0)
2424 return 0;
2426 arg1 %= width;
2427 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2428 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2429 break;
2431 case ROTATE:
2432 if (arg1 < 0)
2433 return 0;
2435 arg1 %= width;
2436 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2437 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2438 break;
2440 case COMPARE:
2441 /* Do nothing here. */
2442 return 0;
2444 case SMIN:
2445 val = arg0s <= arg1s ? arg0s : arg1s;
2446 break;
2448 case UMIN:
2449 val = ((unsigned HOST_WIDE_INT) arg0
2450 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2451 break;
2453 case SMAX:
2454 val = arg0s > arg1s ? arg0s : arg1s;
2455 break;
2457 case UMAX:
2458 val = ((unsigned HOST_WIDE_INT) arg0
2459 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2460 break;
2462 case SS_PLUS:
2463 case US_PLUS:
2464 case SS_MINUS:
2465 case US_MINUS:
2466 /* ??? There are simplifications that can be done. */
2467 return 0;
2469 default:
2470 gcc_unreachable ();
2473 return gen_int_mode (val, mode);
2476 return NULL_RTX;
2481 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2482 PLUS or MINUS.
2484 Rather than test for specific case, we do this by a brute-force method
2485 and do all possible simplifications until no more changes occur. Then
2486 we rebuild the operation.
2488 If FORCE is true, then always generate the rtx. This is used to
2489 canonicalize stuff emitted from simplify_gen_binary. Note that this
2490 can still fail if the rtx is too complex. It won't fail just because
2491 the result is not 'simpler' than the input, however. */
2493 struct simplify_plus_minus_op_data
2495 rtx op;
2496 int neg;
2499 static int
2500 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2502 const struct simplify_plus_minus_op_data *d1 = p1;
2503 const struct simplify_plus_minus_op_data *d2 = p2;
2505 return (commutative_operand_precedence (d2->op)
2506 - commutative_operand_precedence (d1->op));
2509 static rtx
2510 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2511 rtx op1, int force)
2513 struct simplify_plus_minus_op_data ops[8];
2514 rtx result, tem;
2515 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2516 int first, changed;
2517 int i, j;
2519 memset (ops, 0, sizeof ops);
2521 /* Set up the two operands and then expand them until nothing has been
2522 changed. If we run out of room in our array, give up; this should
2523 almost never happen. */
2525 ops[0].op = op0;
2526 ops[0].neg = 0;
2527 ops[1].op = op1;
2528 ops[1].neg = (code == MINUS);
2532 changed = 0;
2534 for (i = 0; i < n_ops; i++)
2536 rtx this_op = ops[i].op;
2537 int this_neg = ops[i].neg;
2538 enum rtx_code this_code = GET_CODE (this_op);
2540 switch (this_code)
2542 case PLUS:
2543 case MINUS:
2544 if (n_ops == 7)
2545 return NULL_RTX;
2547 ops[n_ops].op = XEXP (this_op, 1);
2548 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2549 n_ops++;
2551 ops[i].op = XEXP (this_op, 0);
2552 input_ops++;
2553 changed = 1;
2554 break;
2556 case NEG:
2557 ops[i].op = XEXP (this_op, 0);
2558 ops[i].neg = ! this_neg;
2559 changed = 1;
2560 break;
2562 case CONST:
2563 if (n_ops < 7
2564 && GET_CODE (XEXP (this_op, 0)) == PLUS
2565 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2566 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2568 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2569 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2570 ops[n_ops].neg = this_neg;
2571 n_ops++;
2572 input_consts++;
2573 changed = 1;
2575 break;
2577 case NOT:
2578 /* ~a -> (-a - 1) */
2579 if (n_ops != 7)
2581 ops[n_ops].op = constm1_rtx;
2582 ops[n_ops++].neg = this_neg;
2583 ops[i].op = XEXP (this_op, 0);
2584 ops[i].neg = !this_neg;
2585 changed = 1;
2587 break;
2589 case CONST_INT:
2590 if (this_neg)
2592 ops[i].op = neg_const_int (mode, this_op);
2593 ops[i].neg = 0;
2594 changed = 1;
2596 break;
2598 default:
2599 break;
2603 while (changed);
2605 /* If we only have two operands, we can't do anything. */
2606 if (n_ops <= 2 && !force)
2607 return NULL_RTX;
2609 /* Count the number of CONSTs we didn't split above. */
2610 for (i = 0; i < n_ops; i++)
2611 if (GET_CODE (ops[i].op) == CONST)
2612 input_consts++;
2614 /* Now simplify each pair of operands until nothing changes. The first
2615 time through just simplify constants against each other. */
2617 first = 1;
2620 changed = first;
2622 for (i = 0; i < n_ops - 1; i++)
2623 for (j = i + 1; j < n_ops; j++)
2625 rtx lhs = ops[i].op, rhs = ops[j].op;
2626 int lneg = ops[i].neg, rneg = ops[j].neg;
2628 if (lhs != 0 && rhs != 0
2629 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2631 enum rtx_code ncode = PLUS;
2633 if (lneg != rneg)
2635 ncode = MINUS;
2636 if (lneg)
2637 tem = lhs, lhs = rhs, rhs = tem;
2639 else if (swap_commutative_operands_p (lhs, rhs))
2640 tem = lhs, lhs = rhs, rhs = tem;
2642 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2644 /* Reject "simplifications" that just wrap the two
2645 arguments in a CONST. Failure to do so can result
2646 in infinite recursion with simplify_binary_operation
2647 when it calls us to simplify CONST operations. */
2648 if (tem
2649 && ! (GET_CODE (tem) == CONST
2650 && GET_CODE (XEXP (tem, 0)) == ncode
2651 && XEXP (XEXP (tem, 0), 0) == lhs
2652 && XEXP (XEXP (tem, 0), 1) == rhs)
2653 /* Don't allow -x + -1 -> ~x simplifications in the
2654 first pass. This allows us the chance to combine
2655 the -1 with other constants. */
2656 && ! (first
2657 && GET_CODE (tem) == NOT
2658 && XEXP (tem, 0) == rhs))
2660 lneg &= rneg;
2661 if (GET_CODE (tem) == NEG)
2662 tem = XEXP (tem, 0), lneg = !lneg;
2663 if (GET_CODE (tem) == CONST_INT && lneg)
2664 tem = neg_const_int (mode, tem), lneg = 0;
2666 ops[i].op = tem;
2667 ops[i].neg = lneg;
2668 ops[j].op = NULL_RTX;
2669 changed = 1;
2674 first = 0;
2676 while (changed);
2678 /* Pack all the operands to the lower-numbered entries. */
2679 for (i = 0, j = 0; j < n_ops; j++)
2680 if (ops[j].op)
2681 ops[i++] = ops[j];
2682 n_ops = i;
2684 /* Sort the operations based on swap_commutative_operands_p. */
2685 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2687 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2688 if (n_ops == 2
2689 && GET_CODE (ops[1].op) == CONST_INT
2690 && CONSTANT_P (ops[0].op)
2691 && ops[0].neg)
2692 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2694 /* We suppressed creation of trivial CONST expressions in the
2695 combination loop to avoid recursion. Create one manually now.
2696 The combination loop should have ensured that there is exactly
2697 one CONST_INT, and the sort will have ensured that it is last
2698 in the array and that any other constant will be next-to-last. */
2700 if (n_ops > 1
2701 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2702 && CONSTANT_P (ops[n_ops - 2].op))
2704 rtx value = ops[n_ops - 1].op;
2705 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2706 value = neg_const_int (mode, value);
2707 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2708 n_ops--;
2711 /* Count the number of CONSTs that we generated. */
2712 n_consts = 0;
2713 for (i = 0; i < n_ops; i++)
2714 if (GET_CODE (ops[i].op) == CONST)
2715 n_consts++;
2717 /* Give up if we didn't reduce the number of operands we had. Make
2718 sure we count a CONST as two operands. If we have the same
2719 number of operands, but have made more CONSTs than before, this
2720 is also an improvement, so accept it. */
2721 if (!force
2722 && (n_ops + n_consts > input_ops
2723 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2724 return NULL_RTX;
2726 /* Put a non-negated operand first, if possible. */
2728 for (i = 0; i < n_ops && ops[i].neg; i++)
2729 continue;
2730 if (i == n_ops)
2731 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2732 else if (i != 0)
2734 tem = ops[0].op;
2735 ops[0] = ops[i];
2736 ops[i].op = tem;
2737 ops[i].neg = 1;
2740 /* Now make the result by performing the requested operations. */
2741 result = ops[0].op;
2742 for (i = 1; i < n_ops; i++)
2743 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2744 mode, result, ops[i].op);
2746 return result;
2749 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2750 static bool
2751 plus_minus_operand_p (rtx x)
2753 return GET_CODE (x) == PLUS
2754 || GET_CODE (x) == MINUS
2755 || (GET_CODE (x) == CONST
2756 && GET_CODE (XEXP (x, 0)) == PLUS
2757 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2758 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2761 /* Like simplify_binary_operation except used for relational operators.
2762 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2763 not also be VOIDmode.
2765 CMP_MODE specifies in which mode the comparison is done in, so it is
2766 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2767 the operands or, if both are VOIDmode, the operands are compared in
2768 "infinite precision". */
2770 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2771 enum machine_mode cmp_mode, rtx op0, rtx op1)
2773 rtx tem, trueop0, trueop1;
2775 if (cmp_mode == VOIDmode)
2776 cmp_mode = GET_MODE (op0);
2777 if (cmp_mode == VOIDmode)
2778 cmp_mode = GET_MODE (op1);
2780 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2781 if (tem)
2783 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2785 if (tem == const0_rtx)
2786 return CONST0_RTX (mode);
2787 #ifdef FLOAT_STORE_FLAG_VALUE
2789 REAL_VALUE_TYPE val;
2790 val = FLOAT_STORE_FLAG_VALUE (mode);
2791 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2793 #else
2794 return NULL_RTX;
2795 #endif
2797 if (VECTOR_MODE_P (mode))
2799 if (tem == const0_rtx)
2800 return CONST0_RTX (mode);
2801 #ifdef VECTOR_STORE_FLAG_VALUE
2803 int i, units;
2804 rtvec v;
2806 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2807 if (val == NULL_RTX)
2808 return NULL_RTX;
2809 if (val == const1_rtx)
2810 return CONST1_RTX (mode);
2812 units = GET_MODE_NUNITS (mode);
2813 v = rtvec_alloc (units);
2814 for (i = 0; i < units; i++)
2815 RTVEC_ELT (v, i) = val;
2816 return gen_rtx_raw_CONST_VECTOR (mode, v);
2818 #else
2819 return NULL_RTX;
2820 #endif
2823 return tem;
2826 /* For the following tests, ensure const0_rtx is op1. */
2827 if (swap_commutative_operands_p (op0, op1)
2828 || (op0 == const0_rtx && op1 != const0_rtx))
2829 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2831 /* If op0 is a compare, extract the comparison arguments from it. */
2832 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2833 return simplify_relational_operation (code, mode, VOIDmode,
2834 XEXP (op0, 0), XEXP (op0, 1));
2836 if (mode == VOIDmode
2837 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2838 || CC0_P (op0))
2839 return NULL_RTX;
2841 trueop0 = avoid_constant_pool_reference (op0);
2842 trueop1 = avoid_constant_pool_reference (op1);
2843 return simplify_relational_operation_1 (code, mode, cmp_mode,
2844 trueop0, trueop1);
2847 /* This part of simplify_relational_operation is only used when CMP_MODE
2848 is not in class MODE_CC (i.e. it is a real comparison).
2850 MODE is the mode of the result, while CMP_MODE specifies in which
2851 mode the comparison is done in, so it is the mode of the operands. */
2853 static rtx
2854 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2855 enum machine_mode cmp_mode, rtx op0, rtx op1)
2857 enum rtx_code op0code = GET_CODE (op0);
2859 if (GET_CODE (op1) == CONST_INT)
2861 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2863 /* If op0 is a comparison, extract the comparison arguments form it. */
2864 if (code == NE)
2866 if (GET_MODE (op0) == mode)
2867 return simplify_rtx (op0);
2868 else
2869 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2870 XEXP (op0, 0), XEXP (op0, 1));
2872 else if (code == EQ)
2874 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2875 if (new_code != UNKNOWN)
2876 return simplify_gen_relational (new_code, mode, VOIDmode,
2877 XEXP (op0, 0), XEXP (op0, 1));
2882 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2883 if ((code == EQ || code == NE)
2884 && (op0code == PLUS || op0code == MINUS)
2885 && CONSTANT_P (op1)
2886 && CONSTANT_P (XEXP (op0, 1))
2887 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
2889 rtx x = XEXP (op0, 0);
2890 rtx c = XEXP (op0, 1);
2892 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
2893 cmp_mode, op1, c);
2894 return simplify_gen_relational (code, mode, cmp_mode, x, c);
2897 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
2898 the same as (zero_extract:SI FOO (const_int 1) BAR). */
2899 if (code == NE
2900 && op1 == const0_rtx
2901 && GET_MODE_CLASS (mode) == MODE_INT
2902 && cmp_mode != VOIDmode
2903 /* ??? Work-around BImode bugs in the ia64 backend. */
2904 && mode != BImode
2905 && cmp_mode != BImode
2906 && nonzero_bits (op0, cmp_mode) == 1
2907 && STORE_FLAG_VALUE == 1)
2908 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
2909 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
2910 : lowpart_subreg (mode, op0, cmp_mode);
2912 return NULL_RTX;
2915 /* Check if the given comparison (done in the given MODE) is actually a
2916 tautology or a contradiction.
2917 If no simplification is possible, this function returns zero.
2918 Otherwise, it returns either const_true_rtx or const0_rtx. */
2921 simplify_const_relational_operation (enum rtx_code code,
2922 enum machine_mode mode,
2923 rtx op0, rtx op1)
2925 int equal, op0lt, op0ltu, op1lt, op1ltu;
2926 rtx tem;
2927 rtx trueop0;
2928 rtx trueop1;
2930 gcc_assert (mode != VOIDmode
2931 || (GET_MODE (op0) == VOIDmode
2932 && GET_MODE (op1) == VOIDmode));
2934 /* If op0 is a compare, extract the comparison arguments from it. */
2935 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2936 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2938 /* We can't simplify MODE_CC values since we don't know what the
2939 actual comparison is. */
2940 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2941 return 0;
2943 /* Make sure the constant is second. */
2944 if (swap_commutative_operands_p (op0, op1))
2946 tem = op0, op0 = op1, op1 = tem;
2947 code = swap_condition (code);
2950 trueop0 = avoid_constant_pool_reference (op0);
2951 trueop1 = avoid_constant_pool_reference (op1);
2953 /* For integer comparisons of A and B maybe we can simplify A - B and can
2954 then simplify a comparison of that with zero. If A and B are both either
2955 a register or a CONST_INT, this can't help; testing for these cases will
2956 prevent infinite recursion here and speed things up.
2958 If CODE is an unsigned comparison, then we can never do this optimization,
2959 because it gives an incorrect result if the subtraction wraps around zero.
2960 ANSI C defines unsigned operations such that they never overflow, and
2961 thus such cases can not be ignored; but we cannot do it even for
2962 signed comparisons for languages such as Java, so test flag_wrapv. */
2964 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2965 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2966 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2967 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2968 /* We cannot do this for == or != if tem is a nonzero address. */
2969 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2970 && code != GTU && code != GEU && code != LTU && code != LEU)
2971 return simplify_const_relational_operation (signed_condition (code),
2972 mode, tem, const0_rtx);
2974 if (flag_unsafe_math_optimizations && code == ORDERED)
2975 return const_true_rtx;
2977 if (flag_unsafe_math_optimizations && code == UNORDERED)
2978 return const0_rtx;
2980 /* For modes without NaNs, if the two operands are equal, we know the
2981 result except if they have side-effects. */
2982 if (! HONOR_NANS (GET_MODE (trueop0))
2983 && rtx_equal_p (trueop0, trueop1)
2984 && ! side_effects_p (trueop0))
2985 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2987 /* If the operands are floating-point constants, see if we can fold
2988 the result. */
2989 else if (GET_CODE (trueop0) == CONST_DOUBLE
2990 && GET_CODE (trueop1) == CONST_DOUBLE
2991 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2993 REAL_VALUE_TYPE d0, d1;
2995 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2996 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2998 /* Comparisons are unordered iff at least one of the values is NaN. */
2999 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3000 switch (code)
3002 case UNEQ:
3003 case UNLT:
3004 case UNGT:
3005 case UNLE:
3006 case UNGE:
3007 case NE:
3008 case UNORDERED:
3009 return const_true_rtx;
3010 case EQ:
3011 case LT:
3012 case GT:
3013 case LE:
3014 case GE:
3015 case LTGT:
3016 case ORDERED:
3017 return const0_rtx;
3018 default:
3019 return 0;
3022 equal = REAL_VALUES_EQUAL (d0, d1);
3023 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3024 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3027 /* Otherwise, see if the operands are both integers. */
3028 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3029 && (GET_CODE (trueop0) == CONST_DOUBLE
3030 || GET_CODE (trueop0) == CONST_INT)
3031 && (GET_CODE (trueop1) == CONST_DOUBLE
3032 || GET_CODE (trueop1) == CONST_INT))
3034 int width = GET_MODE_BITSIZE (mode);
3035 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3036 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3038 /* Get the two words comprising each integer constant. */
3039 if (GET_CODE (trueop0) == CONST_DOUBLE)
3041 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3042 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3044 else
3046 l0u = l0s = INTVAL (trueop0);
3047 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3050 if (GET_CODE (trueop1) == CONST_DOUBLE)
3052 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3053 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3055 else
3057 l1u = l1s = INTVAL (trueop1);
3058 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3061 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3062 we have to sign or zero-extend the values. */
3063 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3065 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3066 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3068 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3069 l0s |= ((HOST_WIDE_INT) (-1) << width);
3071 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3072 l1s |= ((HOST_WIDE_INT) (-1) << width);
3074 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3075 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3077 equal = (h0u == h1u && l0u == l1u);
3078 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3079 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3080 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3081 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3084 /* Otherwise, there are some code-specific tests we can make. */
3085 else
3087 /* Optimize comparisons with upper and lower bounds. */
3088 if (SCALAR_INT_MODE_P (mode)
3089 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3091 rtx mmin, mmax;
3092 int sign;
3094 if (code == GEU
3095 || code == LEU
3096 || code == GTU
3097 || code == LTU)
3098 sign = 0;
3099 else
3100 sign = 1;
3102 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3104 tem = NULL_RTX;
3105 switch (code)
3107 case GEU:
3108 case GE:
3109 /* x >= min is always true. */
3110 if (rtx_equal_p (trueop1, mmin))
3111 tem = const_true_rtx;
3112 else
3113 break;
3115 case LEU:
3116 case LE:
3117 /* x <= max is always true. */
3118 if (rtx_equal_p (trueop1, mmax))
3119 tem = const_true_rtx;
3120 break;
3122 case GTU:
3123 case GT:
3124 /* x > max is always false. */
3125 if (rtx_equal_p (trueop1, mmax))
3126 tem = const0_rtx;
3127 break;
3129 case LTU:
3130 case LT:
3131 /* x < min is always false. */
3132 if (rtx_equal_p (trueop1, mmin))
3133 tem = const0_rtx;
3134 break;
3136 default:
3137 break;
3139 if (tem == const0_rtx
3140 || tem == const_true_rtx)
3141 return tem;
3144 switch (code)
3146 case EQ:
3147 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3148 return const0_rtx;
3149 break;
3151 case NE:
3152 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3153 return const_true_rtx;
3154 break;
3156 case LT:
3157 /* Optimize abs(x) < 0.0. */
3158 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3160 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3161 : trueop0;
3162 if (GET_CODE (tem) == ABS)
3163 return const0_rtx;
3165 break;
3167 case GE:
3168 /* Optimize abs(x) >= 0.0. */
3169 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3171 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3172 : trueop0;
3173 if (GET_CODE (tem) == ABS)
3174 return const_true_rtx;
3176 break;
3178 case UNGE:
3179 /* Optimize ! (abs(x) < 0.0). */
3180 if (trueop1 == CONST0_RTX (mode))
3182 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3183 : trueop0;
3184 if (GET_CODE (tem) == ABS)
3185 return const_true_rtx;
3187 break;
3189 default:
3190 break;
3193 return 0;
3196 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3197 as appropriate. */
3198 switch (code)
3200 case EQ:
3201 case UNEQ:
3202 return equal ? const_true_rtx : const0_rtx;
3203 case NE:
3204 case LTGT:
3205 return ! equal ? const_true_rtx : const0_rtx;
3206 case LT:
3207 case UNLT:
3208 return op0lt ? const_true_rtx : const0_rtx;
3209 case GT:
3210 case UNGT:
3211 return op1lt ? const_true_rtx : const0_rtx;
3212 case LTU:
3213 return op0ltu ? const_true_rtx : const0_rtx;
3214 case GTU:
3215 return op1ltu ? const_true_rtx : const0_rtx;
3216 case LE:
3217 case UNLE:
3218 return equal || op0lt ? const_true_rtx : const0_rtx;
3219 case GE:
3220 case UNGE:
3221 return equal || op1lt ? const_true_rtx : const0_rtx;
3222 case LEU:
3223 return equal || op0ltu ? const_true_rtx : const0_rtx;
3224 case GEU:
3225 return equal || op1ltu ? const_true_rtx : const0_rtx;
3226 case ORDERED:
3227 return const_true_rtx;
3228 case UNORDERED:
3229 return const0_rtx;
3230 default:
3231 gcc_unreachable ();
3235 /* Simplify CODE, an operation with result mode MODE and three operands,
3236 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3237 a constant. Return 0 if no simplifications is possible. */
3240 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3241 enum machine_mode op0_mode, rtx op0, rtx op1,
3242 rtx op2)
3244 unsigned int width = GET_MODE_BITSIZE (mode);
3246 /* VOIDmode means "infinite" precision. */
3247 if (width == 0)
3248 width = HOST_BITS_PER_WIDE_INT;
3250 switch (code)
3252 case SIGN_EXTRACT:
3253 case ZERO_EXTRACT:
3254 if (GET_CODE (op0) == CONST_INT
3255 && GET_CODE (op1) == CONST_INT
3256 && GET_CODE (op2) == CONST_INT
3257 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3258 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3260 /* Extracting a bit-field from a constant */
3261 HOST_WIDE_INT val = INTVAL (op0);
3263 if (BITS_BIG_ENDIAN)
3264 val >>= (GET_MODE_BITSIZE (op0_mode)
3265 - INTVAL (op2) - INTVAL (op1));
3266 else
3267 val >>= INTVAL (op2);
3269 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3271 /* First zero-extend. */
3272 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3273 /* If desired, propagate sign bit. */
3274 if (code == SIGN_EXTRACT
3275 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3276 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3279 /* Clear the bits that don't belong in our mode,
3280 unless they and our sign bit are all one.
3281 So we get either a reasonable negative value or a reasonable
3282 unsigned value for this mode. */
3283 if (width < HOST_BITS_PER_WIDE_INT
3284 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3285 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3286 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3288 return gen_int_mode (val, mode);
3290 break;
3292 case IF_THEN_ELSE:
3293 if (GET_CODE (op0) == CONST_INT)
3294 return op0 != const0_rtx ? op1 : op2;
3296 /* Convert c ? a : a into "a". */
3297 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3298 return op1;
3300 /* Convert a != b ? a : b into "a". */
3301 if (GET_CODE (op0) == NE
3302 && ! side_effects_p (op0)
3303 && ! HONOR_NANS (mode)
3304 && ! HONOR_SIGNED_ZEROS (mode)
3305 && ((rtx_equal_p (XEXP (op0, 0), op1)
3306 && rtx_equal_p (XEXP (op0, 1), op2))
3307 || (rtx_equal_p (XEXP (op0, 0), op2)
3308 && rtx_equal_p (XEXP (op0, 1), op1))))
3309 return op1;
3311 /* Convert a == b ? a : b into "b". */
3312 if (GET_CODE (op0) == EQ
3313 && ! side_effects_p (op0)
3314 && ! HONOR_NANS (mode)
3315 && ! HONOR_SIGNED_ZEROS (mode)
3316 && ((rtx_equal_p (XEXP (op0, 0), op1)
3317 && rtx_equal_p (XEXP (op0, 1), op2))
3318 || (rtx_equal_p (XEXP (op0, 0), op2)
3319 && rtx_equal_p (XEXP (op0, 1), op1))))
3320 return op2;
3322 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3324 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3325 ? GET_MODE (XEXP (op0, 1))
3326 : GET_MODE (XEXP (op0, 0)));
3327 rtx temp;
3329 /* Look for happy constants in op1 and op2. */
3330 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3332 HOST_WIDE_INT t = INTVAL (op1);
3333 HOST_WIDE_INT f = INTVAL (op2);
3335 if (t == STORE_FLAG_VALUE && f == 0)
3336 code = GET_CODE (op0);
3337 else if (t == 0 && f == STORE_FLAG_VALUE)
3339 enum rtx_code tmp;
3340 tmp = reversed_comparison_code (op0, NULL_RTX);
3341 if (tmp == UNKNOWN)
3342 break;
3343 code = tmp;
3345 else
3346 break;
3348 return simplify_gen_relational (code, mode, cmp_mode,
3349 XEXP (op0, 0), XEXP (op0, 1));
3352 if (cmp_mode == VOIDmode)
3353 cmp_mode = op0_mode;
3354 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3355 cmp_mode, XEXP (op0, 0),
3356 XEXP (op0, 1));
3358 /* See if any simplifications were possible. */
3359 if (temp)
3361 if (GET_CODE (temp) == CONST_INT)
3362 return temp == const0_rtx ? op2 : op1;
3363 else if (temp)
3364 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3367 break;
3369 case VEC_MERGE:
3370 gcc_assert (GET_MODE (op0) == mode);
3371 gcc_assert (GET_MODE (op1) == mode);
3372 gcc_assert (VECTOR_MODE_P (mode));
3373 op2 = avoid_constant_pool_reference (op2);
3374 if (GET_CODE (op2) == CONST_INT)
3376 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3377 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3378 int mask = (1 << n_elts) - 1;
3380 if (!(INTVAL (op2) & mask))
3381 return op1;
3382 if ((INTVAL (op2) & mask) == mask)
3383 return op0;
3385 op0 = avoid_constant_pool_reference (op0);
3386 op1 = avoid_constant_pool_reference (op1);
3387 if (GET_CODE (op0) == CONST_VECTOR
3388 && GET_CODE (op1) == CONST_VECTOR)
3390 rtvec v = rtvec_alloc (n_elts);
3391 unsigned int i;
3393 for (i = 0; i < n_elts; i++)
3394 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3395 ? CONST_VECTOR_ELT (op0, i)
3396 : CONST_VECTOR_ELT (op1, i));
3397 return gen_rtx_CONST_VECTOR (mode, v);
3400 break;
3402 default:
3403 gcc_unreachable ();
3406 return 0;
3409 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3410 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3412 Works by unpacking OP into a collection of 8-bit values
3413 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3414 and then repacking them again for OUTERMODE. */
3416 static rtx
3417 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3418 enum machine_mode innermode, unsigned int byte)
3420 /* We support up to 512-bit values (for V8DFmode). */
3421 enum {
3422 max_bitsize = 512,
3423 value_bit = 8,
3424 value_mask = (1 << value_bit) - 1
3426 unsigned char value[max_bitsize / value_bit];
3427 int value_start;
3428 int i;
3429 int elem;
3431 int num_elem;
3432 rtx * elems;
3433 int elem_bitsize;
3434 rtx result_s;
3435 rtvec result_v = NULL;
3436 enum mode_class outer_class;
3437 enum machine_mode outer_submode;
3439 /* Some ports misuse CCmode. */
3440 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3441 return op;
3443 /* We have no way to represent a complex constant at the rtl level. */
3444 if (COMPLEX_MODE_P (outermode))
3445 return NULL_RTX;
3447 /* Unpack the value. */
3449 if (GET_CODE (op) == CONST_VECTOR)
3451 num_elem = CONST_VECTOR_NUNITS (op);
3452 elems = &CONST_VECTOR_ELT (op, 0);
3453 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3455 else
3457 num_elem = 1;
3458 elems = &op;
3459 elem_bitsize = max_bitsize;
3461 /* If this asserts, it is too complicated; reducing value_bit may help. */
3462 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3463 /* I don't know how to handle endianness of sub-units. */
3464 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3466 for (elem = 0; elem < num_elem; elem++)
3468 unsigned char * vp;
3469 rtx el = elems[elem];
3471 /* Vectors are kept in target memory order. (This is probably
3472 a mistake.) */
3474 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3475 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3476 / BITS_PER_UNIT);
3477 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3478 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3479 unsigned bytele = (subword_byte % UNITS_PER_WORD
3480 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3481 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3484 switch (GET_CODE (el))
3486 case CONST_INT:
3487 for (i = 0;
3488 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3489 i += value_bit)
3490 *vp++ = INTVAL (el) >> i;
3491 /* CONST_INTs are always logically sign-extended. */
3492 for (; i < elem_bitsize; i += value_bit)
3493 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3494 break;
3496 case CONST_DOUBLE:
3497 if (GET_MODE (el) == VOIDmode)
3499 /* If this triggers, someone should have generated a
3500 CONST_INT instead. */
3501 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3503 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3504 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3505 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3507 *vp++
3508 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3509 i += value_bit;
3511 /* It shouldn't matter what's done here, so fill it with
3512 zero. */
3513 for (; i < max_bitsize; i += value_bit)
3514 *vp++ = 0;
3516 else
3518 long tmp[max_bitsize / 32];
3519 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3521 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3522 gcc_assert (bitsize <= elem_bitsize);
3523 gcc_assert (bitsize % value_bit == 0);
3525 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3526 GET_MODE (el));
3528 /* real_to_target produces its result in words affected by
3529 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3530 and use WORDS_BIG_ENDIAN instead; see the documentation
3531 of SUBREG in rtl.texi. */
3532 for (i = 0; i < bitsize; i += value_bit)
3534 int ibase;
3535 if (WORDS_BIG_ENDIAN)
3536 ibase = bitsize - 1 - i;
3537 else
3538 ibase = i;
3539 *vp++ = tmp[ibase / 32] >> i % 32;
3542 /* It shouldn't matter what's done here, so fill it with
3543 zero. */
3544 for (; i < elem_bitsize; i += value_bit)
3545 *vp++ = 0;
3547 break;
3549 default:
3550 gcc_unreachable ();
3554 /* Now, pick the right byte to start with. */
3555 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3556 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3557 will already have offset 0. */
3558 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3560 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3561 - byte);
3562 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3563 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3564 byte = (subword_byte % UNITS_PER_WORD
3565 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3568 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3569 so if it's become negative it will instead be very large.) */
3570 gcc_assert (byte < GET_MODE_SIZE (innermode));
3572 /* Convert from bytes to chunks of size value_bit. */
3573 value_start = byte * (BITS_PER_UNIT / value_bit);
3575 /* Re-pack the value. */
3577 if (VECTOR_MODE_P (outermode))
3579 num_elem = GET_MODE_NUNITS (outermode);
3580 result_v = rtvec_alloc (num_elem);
3581 elems = &RTVEC_ELT (result_v, 0);
3582 outer_submode = GET_MODE_INNER (outermode);
3584 else
3586 num_elem = 1;
3587 elems = &result_s;
3588 outer_submode = outermode;
3591 outer_class = GET_MODE_CLASS (outer_submode);
3592 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3594 gcc_assert (elem_bitsize % value_bit == 0);
3595 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3597 for (elem = 0; elem < num_elem; elem++)
3599 unsigned char *vp;
3601 /* Vectors are stored in target memory order. (This is probably
3602 a mistake.) */
3604 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3605 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3606 / BITS_PER_UNIT);
3607 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3608 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3609 unsigned bytele = (subword_byte % UNITS_PER_WORD
3610 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3611 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3614 switch (outer_class)
3616 case MODE_INT:
3617 case MODE_PARTIAL_INT:
3619 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3621 for (i = 0;
3622 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3623 i += value_bit)
3624 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3625 for (; i < elem_bitsize; i += value_bit)
3626 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3627 << (i - HOST_BITS_PER_WIDE_INT));
3629 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3630 know why. */
3631 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3632 elems[elem] = gen_int_mode (lo, outer_submode);
3633 else
3634 elems[elem] = immed_double_const (lo, hi, outer_submode);
3636 break;
3638 case MODE_FLOAT:
3640 REAL_VALUE_TYPE r;
3641 long tmp[max_bitsize / 32];
3643 /* real_from_target wants its input in words affected by
3644 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3645 and use WORDS_BIG_ENDIAN instead; see the documentation
3646 of SUBREG in rtl.texi. */
3647 for (i = 0; i < max_bitsize / 32; i++)
3648 tmp[i] = 0;
3649 for (i = 0; i < elem_bitsize; i += value_bit)
3651 int ibase;
3652 if (WORDS_BIG_ENDIAN)
3653 ibase = elem_bitsize - 1 - i;
3654 else
3655 ibase = i;
3656 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3659 real_from_target (&r, tmp, outer_submode);
3660 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3662 break;
3664 default:
3665 gcc_unreachable ();
3668 if (VECTOR_MODE_P (outermode))
3669 return gen_rtx_CONST_VECTOR (outermode, result_v);
3670 else
3671 return result_s;
3674 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3675 Return 0 if no simplifications are possible. */
3677 simplify_subreg (enum machine_mode outermode, rtx op,
3678 enum machine_mode innermode, unsigned int byte)
3680 /* Little bit of sanity checking. */
3681 gcc_assert (innermode != VOIDmode);
3682 gcc_assert (outermode != VOIDmode);
3683 gcc_assert (innermode != BLKmode);
3684 gcc_assert (outermode != BLKmode);
3686 gcc_assert (GET_MODE (op) == innermode
3687 || GET_MODE (op) == VOIDmode);
3689 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3690 gcc_assert (byte < GET_MODE_SIZE (innermode));
3692 if (outermode == innermode && !byte)
3693 return op;
3695 if (GET_CODE (op) == CONST_INT
3696 || GET_CODE (op) == CONST_DOUBLE
3697 || GET_CODE (op) == CONST_VECTOR)
3698 return simplify_immed_subreg (outermode, op, innermode, byte);
3700 /* Changing mode twice with SUBREG => just change it once,
3701 or not at all if changing back op starting mode. */
3702 if (GET_CODE (op) == SUBREG)
3704 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3705 int final_offset = byte + SUBREG_BYTE (op);
3706 rtx newx;
3708 if (outermode == innermostmode
3709 && byte == 0 && SUBREG_BYTE (op) == 0)
3710 return SUBREG_REG (op);
3712 /* The SUBREG_BYTE represents offset, as if the value were stored
3713 in memory. Irritating exception is paradoxical subreg, where
3714 we define SUBREG_BYTE to be 0. On big endian machines, this
3715 value should be negative. For a moment, undo this exception. */
3716 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3718 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3719 if (WORDS_BIG_ENDIAN)
3720 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3721 if (BYTES_BIG_ENDIAN)
3722 final_offset += difference % UNITS_PER_WORD;
3724 if (SUBREG_BYTE (op) == 0
3725 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3727 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3728 if (WORDS_BIG_ENDIAN)
3729 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3730 if (BYTES_BIG_ENDIAN)
3731 final_offset += difference % UNITS_PER_WORD;
3734 /* See whether resulting subreg will be paradoxical. */
3735 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3737 /* In nonparadoxical subregs we can't handle negative offsets. */
3738 if (final_offset < 0)
3739 return NULL_RTX;
3740 /* Bail out in case resulting subreg would be incorrect. */
3741 if (final_offset % GET_MODE_SIZE (outermode)
3742 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3743 return NULL_RTX;
3745 else
3747 int offset = 0;
3748 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3750 /* In paradoxical subreg, see if we are still looking on lower part.
3751 If so, our SUBREG_BYTE will be 0. */
3752 if (WORDS_BIG_ENDIAN)
3753 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3754 if (BYTES_BIG_ENDIAN)
3755 offset += difference % UNITS_PER_WORD;
3756 if (offset == final_offset)
3757 final_offset = 0;
3758 else
3759 return NULL_RTX;
3762 /* Recurse for further possible simplifications. */
3763 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3764 final_offset);
3765 if (newx)
3766 return newx;
3767 if (validate_subreg (outermode, innermostmode,
3768 SUBREG_REG (op), final_offset))
3769 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3770 return NULL_RTX;
3773 /* SUBREG of a hard register => just change the register number
3774 and/or mode. If the hard register is not valid in that mode,
3775 suppress this simplification. If the hard register is the stack,
3776 frame, or argument pointer, leave this as a SUBREG. */
3778 if (REG_P (op)
3779 && REGNO (op) < FIRST_PSEUDO_REGISTER
3780 #ifdef CANNOT_CHANGE_MODE_CLASS
3781 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3782 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3783 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3784 #endif
3785 && ((reload_completed && !frame_pointer_needed)
3786 || (REGNO (op) != FRAME_POINTER_REGNUM
3787 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3788 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3789 #endif
3791 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3792 && REGNO (op) != ARG_POINTER_REGNUM
3793 #endif
3794 && REGNO (op) != STACK_POINTER_REGNUM
3795 && subreg_offset_representable_p (REGNO (op), innermode,
3796 byte, outermode))
3798 unsigned int regno = REGNO (op);
3799 unsigned int final_regno
3800 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3802 /* ??? We do allow it if the current REG is not valid for
3803 its mode. This is a kludge to work around how float/complex
3804 arguments are passed on 32-bit SPARC and should be fixed. */
3805 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3806 || ! HARD_REGNO_MODE_OK (regno, innermode))
3808 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3810 /* Propagate original regno. We don't have any way to specify
3811 the offset inside original regno, so do so only for lowpart.
3812 The information is used only by alias analysis that can not
3813 grog partial register anyway. */
3815 if (subreg_lowpart_offset (outermode, innermode) == byte)
3816 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3817 return x;
3821 /* If we have a SUBREG of a register that we are replacing and we are
3822 replacing it with a MEM, make a new MEM and try replacing the
3823 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3824 or if we would be widening it. */
3826 if (MEM_P (op)
3827 && ! mode_dependent_address_p (XEXP (op, 0))
3828 /* Allow splitting of volatile memory references in case we don't
3829 have instruction to move the whole thing. */
3830 && (! MEM_VOLATILE_P (op)
3831 || ! have_insn_for (SET, innermode))
3832 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3833 return adjust_address_nv (op, outermode, byte);
3835 /* Handle complex values represented as CONCAT
3836 of real and imaginary part. */
3837 if (GET_CODE (op) == CONCAT)
3839 unsigned int inner_size, final_offset;
3840 rtx part, res;
3842 inner_size = GET_MODE_UNIT_SIZE (innermode);
3843 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3844 final_offset = byte % inner_size;
3845 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3846 return NULL_RTX;
3848 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3849 if (res)
3850 return res;
3851 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3852 return gen_rtx_SUBREG (outermode, part, final_offset);
3853 return NULL_RTX;
3856 /* Optimize SUBREG truncations of zero and sign extended values. */
3857 if ((GET_CODE (op) == ZERO_EXTEND
3858 || GET_CODE (op) == SIGN_EXTEND)
3859 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3861 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3863 /* If we're requesting the lowpart of a zero or sign extension,
3864 there are three possibilities. If the outermode is the same
3865 as the origmode, we can omit both the extension and the subreg.
3866 If the outermode is not larger than the origmode, we can apply
3867 the truncation without the extension. Finally, if the outermode
3868 is larger than the origmode, but both are integer modes, we
3869 can just extend to the appropriate mode. */
3870 if (bitpos == 0)
3872 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3873 if (outermode == origmode)
3874 return XEXP (op, 0);
3875 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3876 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3877 subreg_lowpart_offset (outermode,
3878 origmode));
3879 if (SCALAR_INT_MODE_P (outermode))
3880 return simplify_gen_unary (GET_CODE (op), outermode,
3881 XEXP (op, 0), origmode);
3884 /* A SUBREG resulting from a zero extension may fold to zero if
3885 it extracts higher bits that the ZERO_EXTEND's source bits. */
3886 if (GET_CODE (op) == ZERO_EXTEND
3887 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3888 return CONST0_RTX (outermode);
3891 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3892 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3893 the outer subreg is effectively a truncation to the original mode. */
3894 if ((GET_CODE (op) == LSHIFTRT
3895 || GET_CODE (op) == ASHIFTRT)
3896 && SCALAR_INT_MODE_P (outermode)
3897 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3898 to avoid the possibility that an outer LSHIFTRT shifts by more
3899 than the sign extension's sign_bit_copies and introduces zeros
3900 into the high bits of the result. */
3901 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
3902 && GET_CODE (XEXP (op, 1)) == CONST_INT
3903 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
3904 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3905 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3906 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3907 return simplify_gen_binary (ASHIFTRT, outermode,
3908 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3910 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
3911 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
3912 the outer subreg is effectively a truncation to the original mode. */
3913 if ((GET_CODE (op) == LSHIFTRT
3914 || GET_CODE (op) == ASHIFTRT)
3915 && SCALAR_INT_MODE_P (outermode)
3916 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3917 && GET_CODE (XEXP (op, 1)) == CONST_INT
3918 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3919 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3920 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3921 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3922 return simplify_gen_binary (LSHIFTRT, outermode,
3923 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3925 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
3926 to (ashift:QI (x:QI) C), where C is a suitable small constant and
3927 the outer subreg is effectively a truncation to the original mode. */
3928 if (GET_CODE (op) == ASHIFT
3929 && SCALAR_INT_MODE_P (outermode)
3930 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3931 && GET_CODE (XEXP (op, 1)) == CONST_INT
3932 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3933 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
3934 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3935 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3936 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3937 return simplify_gen_binary (ASHIFT, outermode,
3938 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3940 return NULL_RTX;
3943 /* Make a SUBREG operation or equivalent if it folds. */
3946 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3947 enum machine_mode innermode, unsigned int byte)
3949 rtx newx;
3951 newx = simplify_subreg (outermode, op, innermode, byte);
3952 if (newx)
3953 return newx;
3955 if (GET_CODE (op) == SUBREG
3956 || GET_CODE (op) == CONCAT
3957 || GET_MODE (op) == VOIDmode)
3958 return NULL_RTX;
3960 if (validate_subreg (outermode, innermode, op, byte))
3961 return gen_rtx_SUBREG (outermode, op, byte);
3963 return NULL_RTX;
3966 /* Simplify X, an rtx expression.
3968 Return the simplified expression or NULL if no simplifications
3969 were possible.
3971 This is the preferred entry point into the simplification routines;
3972 however, we still allow passes to call the more specific routines.
3974 Right now GCC has three (yes, three) major bodies of RTL simplification
3975 code that need to be unified.
3977 1. fold_rtx in cse.c. This code uses various CSE specific
3978 information to aid in RTL simplification.
3980 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3981 it uses combine specific information to aid in RTL
3982 simplification.
3984 3. The routines in this file.
3987 Long term we want to only have one body of simplification code; to
3988 get to that state I recommend the following steps:
3990 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3991 which are not pass dependent state into these routines.
3993 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3994 use this routine whenever possible.
3996 3. Allow for pass dependent state to be provided to these
3997 routines and add simplifications based on the pass dependent
3998 state. Remove code from cse.c & combine.c that becomes
3999 redundant/dead.
4001 It will take time, but ultimately the compiler will be easier to
4002 maintain and improve. It's totally silly that when we add a
4003 simplification that it needs to be added to 4 places (3 for RTL
4004 simplification and 1 for tree simplification. */
4007 simplify_rtx (rtx x)
4009 enum rtx_code code = GET_CODE (x);
4010 enum machine_mode mode = GET_MODE (x);
4012 switch (GET_RTX_CLASS (code))
4014 case RTX_UNARY:
4015 return simplify_unary_operation (code, mode,
4016 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4017 case RTX_COMM_ARITH:
4018 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4019 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4021 /* Fall through.... */
4023 case RTX_BIN_ARITH:
4024 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4026 case RTX_TERNARY:
4027 case RTX_BITFIELD_OPS:
4028 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4029 XEXP (x, 0), XEXP (x, 1),
4030 XEXP (x, 2));
4032 case RTX_COMPARE:
4033 case RTX_COMM_COMPARE:
4034 return simplify_relational_operation (code, mode,
4035 ((GET_MODE (XEXP (x, 0))
4036 != VOIDmode)
4037 ? GET_MODE (XEXP (x, 0))
4038 : GET_MODE (XEXP (x, 1))),
4039 XEXP (x, 0),
4040 XEXP (x, 1));
4042 case RTX_EXTRA:
4043 if (code == SUBREG)
4044 return simplify_gen_subreg (mode, SUBREG_REG (x),
4045 GET_MODE (SUBREG_REG (x)),
4046 SUBREG_BYTE (x));
4047 break;
4049 case RTX_OBJ:
4050 if (code == LO_SUM)
4052 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4053 if (GET_CODE (XEXP (x, 0)) == HIGH
4054 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4055 return XEXP (x, 1);
4057 break;
4059 default:
4060 break;
4062 return NULL;