* varasm.c (assemble_start_function): Remove reset of in_section.
[official-gcc.git] / gcc / simplify-rtx.c
blob9e02a7a0458acf2bbdb3d06c65849b3aad6b91f3
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 rtx, rtx, rtx, rtx);
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
69 static rtx
70 neg_const_int (enum machine_mode mode, rtx i)
72 return gen_int_mode (- INTVAL (i), mode);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
78 bool
79 mode_signbit_p (enum machine_mode mode, rtx x)
81 unsigned HOST_WIDE_INT val;
82 unsigned int width;
84 if (GET_MODE_CLASS (mode) != MODE_INT)
85 return false;
87 width = GET_MODE_BITSIZE (mode);
88 if (width == 0)
89 return false;
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
93 val = INTVAL (x);
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
101 else
102 return false;
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114 rtx op1)
116 rtx tem;
118 /* Put complex operands first and constants second if commutative. */
119 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
120 && swap_commutative_operands_p (op0, op1))
121 tem = op0, op0 = op1, op1 = tem;
123 /* If this simplifies, do it. */
124 tem = simplify_binary_operation (code, mode, op0, op1);
125 if (tem)
126 return tem;
128 /* Handle addition and subtraction specially. Otherwise, just form
129 the operation. */
131 if (code == PLUS || code == MINUS)
133 tem = simplify_plus_minus (code, mode, op0, op1, 1);
134 if (tem)
135 return tem;
138 return gen_rtx_fmt_ee (code, mode, op0, op1);
141 /* If X is a MEM referencing the constant pool, return the real value.
142 Otherwise return X. */
144 avoid_constant_pool_reference (rtx x)
146 rtx c, tmp, addr;
147 enum machine_mode cmode;
149 switch (GET_CODE (x))
151 case MEM:
152 break;
154 case FLOAT_EXTEND:
155 /* Handle float extensions of constant pool references. */
156 tmp = XEXP (x, 0);
157 c = avoid_constant_pool_reference (tmp);
158 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
160 REAL_VALUE_TYPE d;
162 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
163 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
165 return x;
167 default:
168 return x;
171 addr = XEXP (x, 0);
173 /* Call target hook to avoid the effects of -fpic etc.... */
174 addr = targetm.delegitimize_address (addr);
176 if (GET_CODE (addr) == LO_SUM)
177 addr = XEXP (addr, 1);
179 if (GET_CODE (addr) != SYMBOL_REF
180 || ! CONSTANT_POOL_ADDRESS_P (addr))
181 return x;
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (cmode != GET_MODE (x))
191 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
192 return c ? c : x;
195 return c;
198 /* Make a unary operation by first seeing if it folds and otherwise making
199 the specified operation. */
202 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
203 enum machine_mode op_mode)
205 rtx tem;
207 /* If this simplifies, use it. */
208 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
209 return tem;
211 return gen_rtx_fmt_e (code, mode, op);
214 /* Likewise for ternary operations. */
217 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
218 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
220 rtx tem;
222 /* If this simplifies, use it. */
223 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
224 op0, op1, op2)))
225 return tem;
227 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
230 /* Likewise, for relational operations.
231 CMP_MODE specifies mode comparison is done in. */
234 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
235 enum machine_mode cmp_mode, rtx op0, rtx op1)
237 rtx tem;
239 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
240 op0, op1)))
241 return tem;
243 return gen_rtx_fmt_ee (code, mode, op0, op1);
246 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
247 resulting RTX. Return a new RTX which is as simplified as possible. */
250 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
252 enum rtx_code code = GET_CODE (x);
253 enum machine_mode mode = GET_MODE (x);
254 enum machine_mode op_mode;
255 rtx op0, op1, op2;
257 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
258 to build a new expression substituting recursively. If we can't do
259 anything, return our input. */
261 if (x == old_rtx)
262 return new_rtx;
264 switch (GET_RTX_CLASS (code))
266 case RTX_UNARY:
267 op0 = XEXP (x, 0);
268 op_mode = GET_MODE (op0);
269 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
270 if (op0 == XEXP (x, 0))
271 return x;
272 return simplify_gen_unary (code, mode, op0, op_mode);
274 case RTX_BIN_ARITH:
275 case RTX_COMM_ARITH:
276 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
277 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
278 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
279 return x;
280 return simplify_gen_binary (code, mode, op0, op1);
282 case RTX_COMPARE:
283 case RTX_COMM_COMPARE:
284 op0 = XEXP (x, 0);
285 op1 = XEXP (x, 1);
286 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
287 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
288 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
289 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
290 return x;
291 return simplify_gen_relational (code, mode, op_mode, op0, op1);
293 case RTX_TERNARY:
294 case RTX_BITFIELD_OPS:
295 op0 = XEXP (x, 0);
296 op_mode = GET_MODE (op0);
297 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
298 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
299 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
300 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
301 return x;
302 if (op_mode == VOIDmode)
303 op_mode = GET_MODE (op0);
304 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
306 case RTX_EXTRA:
307 /* The only case we try to handle is a SUBREG. */
308 if (code == SUBREG)
310 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
311 if (op0 == SUBREG_REG (x))
312 return x;
313 op0 = simplify_gen_subreg (GET_MODE (x), op0,
314 GET_MODE (SUBREG_REG (x)),
315 SUBREG_BYTE (x));
316 return op0 ? op0 : x;
318 break;
320 case RTX_OBJ:
321 if (code == MEM)
323 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
324 if (op0 == XEXP (x, 0))
325 return x;
326 return replace_equiv_address_nv (x, op0);
328 else if (code == LO_SUM)
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
333 /* (lo_sum (high x) x) -> x */
334 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
335 return op1;
337 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
338 return x;
339 return gen_rtx_LO_SUM (mode, op0, op1);
341 else if (code == REG)
343 if (rtx_equal_p (x, old_rtx))
344 return new_rtx;
346 break;
348 default:
349 break;
351 return x;
354 /* Try to simplify a unary operation CODE whose output mode is to be
355 MODE with input operand OP whose mode was originally OP_MODE.
356 Return zero if no simplification can be made. */
358 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
359 rtx op, enum machine_mode op_mode)
361 rtx trueop, tem;
363 if (GET_CODE (op) == CONST)
364 op = XEXP (op, 0);
366 trueop = avoid_constant_pool_reference (op);
368 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
369 if (tem)
370 return tem;
372 return simplify_unary_operation_1 (code, mode, op);
375 /* Perform some simplifications we can do even if the operands
376 aren't constant. */
377 static rtx
378 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
380 enum rtx_code reversed;
381 rtx temp;
383 switch (code)
385 case NOT:
386 /* (not (not X)) == X. */
387 if (GET_CODE (op) == NOT)
388 return XEXP (op, 0);
390 /* (not (eq X Y)) == (ne X Y), etc. */
391 if (COMPARISON_P (op)
392 && (mode == BImode || STORE_FLAG_VALUE == -1)
393 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
394 return simplify_gen_relational (reversed, mode, VOIDmode,
395 XEXP (op, 0), XEXP (op, 1));
397 /* (not (plus X -1)) can become (neg X). */
398 if (GET_CODE (op) == PLUS
399 && XEXP (op, 1) == constm1_rtx)
400 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
402 /* Similarly, (not (neg X)) is (plus X -1). */
403 if (GET_CODE (op) == NEG)
404 return plus_constant (XEXP (op, 0), -1);
406 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
407 if (GET_CODE (op) == XOR
408 && GET_CODE (XEXP (op, 1)) == CONST_INT
409 && (temp = simplify_unary_operation (NOT, mode,
410 XEXP (op, 1), mode)) != 0)
411 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
413 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
414 if (GET_CODE (op) == PLUS
415 && GET_CODE (XEXP (op, 1)) == CONST_INT
416 && mode_signbit_p (mode, XEXP (op, 1))
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
422 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
423 operands other than 1, but that is not valid. We could do a
424 similar simplification for (not (lshiftrt C X)) where C is
425 just the sign bit, but this doesn't seem common enough to
426 bother with. */
427 if (GET_CODE (op) == ASHIFT
428 && XEXP (op, 0) == const1_rtx)
430 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
431 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
434 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
435 by reversing the comparison code if valid. */
436 if (STORE_FLAG_VALUE == -1
437 && COMPARISON_P (op)
438 && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
439 return simplify_gen_relational (reversed, mode, VOIDmode,
440 XEXP (op, 0), XEXP (op, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1)) == CONST_INT
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
453 break;
455 case NEG:
456 /* (neg (neg X)) == X. */
457 if (GET_CODE (op) == NEG)
458 return XEXP (op, 0);
460 /* (neg (plus X 1)) can become (not X). */
461 if (GET_CODE (op) == PLUS
462 && XEXP (op, 1) == const1_rtx)
463 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
465 /* Similarly, (neg (not X)) is (plus X 1). */
466 if (GET_CODE (op) == NOT)
467 return plus_constant (XEXP (op, 0), 1);
469 /* (neg (minus X Y)) can become (minus Y X). This transformation
470 isn't safe for modes with signed zeros, since if X and Y are
471 both +0, (minus Y X) is the same as (minus X Y). If the
472 rounding mode is towards +infinity (or -infinity) then the two
473 expressions will be rounded differently. */
474 if (GET_CODE (op) == MINUS
475 && !HONOR_SIGNED_ZEROS (mode)
476 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
477 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
479 if (GET_CODE (op) == PLUS
480 && !HONOR_SIGNED_ZEROS (mode)
481 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
483 /* (neg (plus A C)) is simplified to (minus -C A). */
484 if (GET_CODE (XEXP (op, 1)) == CONST_INT
485 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
487 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
488 if (temp)
489 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
492 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
493 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
494 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
497 /* (neg (mult A B)) becomes (mult (neg A) B).
498 This works even for floating-point values. */
499 if (GET_CODE (op) == MULT
500 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
502 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
503 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
506 /* NEG commutes with ASHIFT since it is multiplication. Only do
507 this if we can then eliminate the NEG (e.g., if the operand
508 is a constant). */
509 if (GET_CODE (op) == ASHIFT)
511 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
512 if (temp)
513 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
516 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
517 C is equal to the width of MODE minus 1. */
518 if (GET_CODE (op) == ASHIFTRT
519 && GET_CODE (XEXP (op, 1)) == CONST_INT
520 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
521 return simplify_gen_binary (LSHIFTRT, mode,
522 XEXP (op, 0), XEXP (op, 1));
524 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
525 C is equal to the width of MODE minus 1. */
526 if (GET_CODE (op) == LSHIFTRT
527 && GET_CODE (XEXP (op, 1)) == CONST_INT
528 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
529 return simplify_gen_binary (ASHIFTRT, mode,
530 XEXP (op, 0), XEXP (op, 1));
532 break;
534 case SIGN_EXTEND:
535 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
536 becomes just the MINUS if its mode is MODE. This allows
537 folding switch statements on machines using casesi (such as
538 the VAX). */
539 if (GET_CODE (op) == TRUNCATE
540 && GET_MODE (XEXP (op, 0)) == mode
541 && GET_CODE (XEXP (op, 0)) == MINUS
542 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
543 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
544 return XEXP (op, 0);
546 /* Check for a sign extension of a subreg of a promoted
547 variable, where the promotion is sign-extended, and the
548 target mode is the same as the variable's promotion. */
549 if (GET_CODE (op) == SUBREG
550 && SUBREG_PROMOTED_VAR_P (op)
551 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
552 && GET_MODE (XEXP (op, 0)) == mode)
553 return XEXP (op, 0);
555 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
556 if (! POINTERS_EXTEND_UNSIGNED
557 && mode == Pmode && GET_MODE (op) == ptr_mode
558 && (CONSTANT_P (op)
559 || (GET_CODE (op) == SUBREG
560 && REG_P (SUBREG_REG (op))
561 && REG_POINTER (SUBREG_REG (op))
562 && GET_MODE (SUBREG_REG (op)) == Pmode)))
563 return convert_memory_address (Pmode, op);
564 #endif
565 break;
567 case ZERO_EXTEND:
568 /* Check for a zero extension of a subreg of a promoted
569 variable, where the promotion is zero-extended, and the
570 target mode is the same as the variable's promotion. */
571 if (GET_CODE (op) == SUBREG
572 && SUBREG_PROMOTED_VAR_P (op)
573 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
574 && GET_MODE (XEXP (op, 0)) == mode)
575 return XEXP (op, 0);
577 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
578 if (POINTERS_EXTEND_UNSIGNED > 0
579 && mode == Pmode && GET_MODE (op) == ptr_mode
580 && (CONSTANT_P (op)
581 || (GET_CODE (op) == SUBREG
582 && REG_P (SUBREG_REG (op))
583 && REG_POINTER (SUBREG_REG (op))
584 && GET_MODE (SUBREG_REG (op)) == Pmode)))
585 return convert_memory_address (Pmode, op);
586 #endif
587 break;
589 default:
590 break;
593 return 0;
596 /* Try to compute the value of a unary operation CODE whose output mode is to
597 be MODE with input operand OP whose mode was originally OP_MODE.
598 Return zero if the value cannot be computed. */
600 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
601 rtx op, enum machine_mode op_mode)
603 unsigned int width = GET_MODE_BITSIZE (mode);
605 if (code == VEC_DUPLICATE)
607 gcc_assert (VECTOR_MODE_P (mode));
608 if (GET_MODE (op) != VOIDmode)
610 if (!VECTOR_MODE_P (GET_MODE (op)))
611 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
612 else
613 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
614 (GET_MODE (op)));
616 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
617 || GET_CODE (op) == CONST_VECTOR)
619 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
620 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
621 rtvec v = rtvec_alloc (n_elts);
622 unsigned int i;
624 if (GET_CODE (op) != CONST_VECTOR)
625 for (i = 0; i < n_elts; i++)
626 RTVEC_ELT (v, i) = op;
627 else
629 enum machine_mode inmode = GET_MODE (op);
630 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
631 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
633 gcc_assert (in_n_elts < n_elts);
634 gcc_assert ((n_elts % in_n_elts) == 0);
635 for (i = 0; i < n_elts; i++)
636 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
638 return gen_rtx_CONST_VECTOR (mode, v);
642 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
644 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
645 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
646 enum machine_mode opmode = GET_MODE (op);
647 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
648 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
649 rtvec v = rtvec_alloc (n_elts);
650 unsigned int i;
652 gcc_assert (op_n_elts == n_elts);
653 for (i = 0; i < n_elts; i++)
655 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
656 CONST_VECTOR_ELT (op, i),
657 GET_MODE_INNER (opmode));
658 if (!x)
659 return 0;
660 RTVEC_ELT (v, i) = x;
662 return gen_rtx_CONST_VECTOR (mode, v);
665 /* The order of these tests is critical so that, for example, we don't
666 check the wrong mode (input vs. output) for a conversion operation,
667 such as FIX. At some point, this should be simplified. */
669 if (code == FLOAT && GET_MODE (op) == VOIDmode
670 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
672 HOST_WIDE_INT hv, lv;
673 REAL_VALUE_TYPE d;
675 if (GET_CODE (op) == CONST_INT)
676 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
677 else
678 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
680 REAL_VALUE_FROM_INT (d, lv, hv, mode);
681 d = real_value_truncate (mode, d);
682 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
684 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
685 && (GET_CODE (op) == CONST_DOUBLE
686 || GET_CODE (op) == CONST_INT))
688 HOST_WIDE_INT hv, lv;
689 REAL_VALUE_TYPE d;
691 if (GET_CODE (op) == CONST_INT)
692 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
693 else
694 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
696 if (op_mode == VOIDmode)
698 /* We don't know how to interpret negative-looking numbers in
699 this case, so don't try to fold those. */
700 if (hv < 0)
701 return 0;
703 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
705 else
706 hv = 0, lv &= GET_MODE_MASK (op_mode);
708 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
709 d = real_value_truncate (mode, d);
710 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
713 if (GET_CODE (op) == CONST_INT
714 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
716 HOST_WIDE_INT arg0 = INTVAL (op);
717 HOST_WIDE_INT val;
719 switch (code)
721 case NOT:
722 val = ~ arg0;
723 break;
725 case NEG:
726 val = - arg0;
727 break;
729 case ABS:
730 val = (arg0 >= 0 ? arg0 : - arg0);
731 break;
733 case FFS:
734 /* Don't use ffs here. Instead, get low order bit and then its
735 number. If arg0 is zero, this will return 0, as desired. */
736 arg0 &= GET_MODE_MASK (mode);
737 val = exact_log2 (arg0 & (- arg0)) + 1;
738 break;
740 case CLZ:
741 arg0 &= GET_MODE_MASK (mode);
742 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
744 else
745 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
746 break;
748 case CTZ:
749 arg0 &= GET_MODE_MASK (mode);
750 if (arg0 == 0)
752 /* Even if the value at zero is undefined, we have to come
753 up with some replacement. Seems good enough. */
754 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
755 val = GET_MODE_BITSIZE (mode);
757 else
758 val = exact_log2 (arg0 & -arg0);
759 break;
761 case POPCOUNT:
762 arg0 &= GET_MODE_MASK (mode);
763 val = 0;
764 while (arg0)
765 val++, arg0 &= arg0 - 1;
766 break;
768 case PARITY:
769 arg0 &= GET_MODE_MASK (mode);
770 val = 0;
771 while (arg0)
772 val++, arg0 &= arg0 - 1;
773 val &= 1;
774 break;
776 case TRUNCATE:
777 val = arg0;
778 break;
780 case ZERO_EXTEND:
781 /* When zero-extending a CONST_INT, we need to know its
782 original mode. */
783 gcc_assert (op_mode != VOIDmode);
784 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
786 /* If we were really extending the mode,
787 we would have to distinguish between zero-extension
788 and sign-extension. */
789 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
790 val = arg0;
792 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
793 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
794 else
795 return 0;
796 break;
798 case SIGN_EXTEND:
799 if (op_mode == VOIDmode)
800 op_mode = mode;
801 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
803 /* If we were really extending the mode,
804 we would have to distinguish between zero-extension
805 and sign-extension. */
806 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
807 val = arg0;
809 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
812 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
813 if (val
814 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
815 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
817 else
818 return 0;
819 break;
821 case SQRT:
822 case FLOAT_EXTEND:
823 case FLOAT_TRUNCATE:
824 case SS_TRUNCATE:
825 case US_TRUNCATE:
826 return 0;
828 default:
829 gcc_unreachable ();
832 return gen_int_mode (val, mode);
835 /* We can do some operations on integer CONST_DOUBLEs. Also allow
836 for a DImode operation on a CONST_INT. */
837 else if (GET_MODE (op) == VOIDmode
838 && width <= HOST_BITS_PER_WIDE_INT * 2
839 && (GET_CODE (op) == CONST_DOUBLE
840 || GET_CODE (op) == CONST_INT))
842 unsigned HOST_WIDE_INT l1, lv;
843 HOST_WIDE_INT h1, hv;
845 if (GET_CODE (op) == CONST_DOUBLE)
846 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
847 else
848 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
850 switch (code)
852 case NOT:
853 lv = ~ l1;
854 hv = ~ h1;
855 break;
857 case NEG:
858 neg_double (l1, h1, &lv, &hv);
859 break;
861 case ABS:
862 if (h1 < 0)
863 neg_double (l1, h1, &lv, &hv);
864 else
865 lv = l1, hv = h1;
866 break;
868 case FFS:
869 hv = 0;
870 if (l1 == 0)
872 if (h1 == 0)
873 lv = 0;
874 else
875 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
877 else
878 lv = exact_log2 (l1 & -l1) + 1;
879 break;
881 case CLZ:
882 hv = 0;
883 if (h1 != 0)
884 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
885 - HOST_BITS_PER_WIDE_INT;
886 else if (l1 != 0)
887 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
888 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
889 lv = GET_MODE_BITSIZE (mode);
890 break;
892 case CTZ:
893 hv = 0;
894 if (l1 != 0)
895 lv = exact_log2 (l1 & -l1);
896 else if (h1 != 0)
897 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
898 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
899 lv = GET_MODE_BITSIZE (mode);
900 break;
902 case POPCOUNT:
903 hv = 0;
904 lv = 0;
905 while (l1)
906 lv++, l1 &= l1 - 1;
907 while (h1)
908 lv++, h1 &= h1 - 1;
909 break;
911 case PARITY:
912 hv = 0;
913 lv = 0;
914 while (l1)
915 lv++, l1 &= l1 - 1;
916 while (h1)
917 lv++, h1 &= h1 - 1;
918 lv &= 1;
919 break;
921 case TRUNCATE:
922 /* This is just a change-of-mode, so do nothing. */
923 lv = l1, hv = h1;
924 break;
926 case ZERO_EXTEND:
927 gcc_assert (op_mode != VOIDmode);
929 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
930 return 0;
932 hv = 0;
933 lv = l1 & GET_MODE_MASK (op_mode);
934 break;
936 case SIGN_EXTEND:
937 if (op_mode == VOIDmode
938 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
939 return 0;
940 else
942 lv = l1 & GET_MODE_MASK (op_mode);
943 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
944 && (lv & ((HOST_WIDE_INT) 1
945 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
946 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
948 hv = HWI_SIGN_EXTEND (lv);
950 break;
952 case SQRT:
953 return 0;
955 default:
956 return 0;
959 return immed_double_const (lv, hv, mode);
962 else if (GET_CODE (op) == CONST_DOUBLE
963 && GET_MODE_CLASS (mode) == MODE_FLOAT)
965 REAL_VALUE_TYPE d, t;
966 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
968 switch (code)
970 case SQRT:
971 if (HONOR_SNANS (mode) && real_isnan (&d))
972 return 0;
973 real_sqrt (&t, mode, &d);
974 d = t;
975 break;
976 case ABS:
977 d = REAL_VALUE_ABS (d);
978 break;
979 case NEG:
980 d = REAL_VALUE_NEGATE (d);
981 break;
982 case FLOAT_TRUNCATE:
983 d = real_value_truncate (mode, d);
984 break;
985 case FLOAT_EXTEND:
986 /* All this does is change the mode. */
987 break;
988 case FIX:
989 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
990 break;
991 case NOT:
993 long tmp[4];
994 int i;
996 real_to_target (tmp, &d, GET_MODE (op));
997 for (i = 0; i < 4; i++)
998 tmp[i] = ~tmp[i];
999 real_from_target (&d, tmp, mode);
1000 break;
1002 default:
1003 gcc_unreachable ();
1005 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1008 else if (GET_CODE (op) == CONST_DOUBLE
1009 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
1010 && GET_MODE_CLASS (mode) == MODE_INT
1011 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1013 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1014 operators are intentionally left unspecified (to ease implementation
1015 by target backends), for consistency, this routine implements the
1016 same semantics for constant folding as used by the middle-end. */
1018 /* This was formerly used only for non-IEEE float.
1019 eggert@twinsun.com says it is safe for IEEE also. */
1020 HOST_WIDE_INT xh, xl, th, tl;
1021 REAL_VALUE_TYPE x, t;
1022 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1023 switch (code)
1025 case FIX:
1026 if (REAL_VALUE_ISNAN (x))
1027 return const0_rtx;
1029 /* Test against the signed upper bound. */
1030 if (width > HOST_BITS_PER_WIDE_INT)
1032 th = ((unsigned HOST_WIDE_INT) 1
1033 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1034 tl = -1;
1036 else
1038 th = 0;
1039 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1041 real_from_integer (&t, VOIDmode, tl, th, 0);
1042 if (REAL_VALUES_LESS (t, x))
1044 xh = th;
1045 xl = tl;
1046 break;
1049 /* Test against the signed lower bound. */
1050 if (width > HOST_BITS_PER_WIDE_INT)
1052 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1053 tl = 0;
1055 else
1057 th = -1;
1058 tl = (HOST_WIDE_INT) -1 << (width - 1);
1060 real_from_integer (&t, VOIDmode, tl, th, 0);
1061 if (REAL_VALUES_LESS (x, t))
1063 xh = th;
1064 xl = tl;
1065 break;
1067 REAL_VALUE_TO_INT (&xl, &xh, x);
1068 break;
1070 case UNSIGNED_FIX:
1071 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1072 return const0_rtx;
1074 /* Test against the unsigned upper bound. */
1075 if (width == 2*HOST_BITS_PER_WIDE_INT)
1077 th = -1;
1078 tl = -1;
1080 else if (width >= HOST_BITS_PER_WIDE_INT)
1082 th = ((unsigned HOST_WIDE_INT) 1
1083 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1084 tl = -1;
1086 else
1088 th = 0;
1089 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1091 real_from_integer (&t, VOIDmode, tl, th, 1);
1092 if (REAL_VALUES_LESS (t, x))
1094 xh = th;
1095 xl = tl;
1096 break;
1099 REAL_VALUE_TO_INT (&xl, &xh, x);
1100 break;
1102 default:
1103 gcc_unreachable ();
1105 return immed_double_const (xl, xh, mode);
1108 return NULL_RTX;
1111 /* Subroutine of simplify_binary_operation to simplify a commutative,
1112 associative binary operation CODE with result mode MODE, operating
1113 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1114 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1115 canonicalization is possible. */
1117 static rtx
1118 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1119 rtx op0, rtx op1)
1121 rtx tem;
1123 /* Linearize the operator to the left. */
1124 if (GET_CODE (op1) == code)
1126 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1127 if (GET_CODE (op0) == code)
1129 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1130 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1133 /* "a op (b op c)" becomes "(b op c) op a". */
1134 if (! swap_commutative_operands_p (op1, op0))
1135 return simplify_gen_binary (code, mode, op1, op0);
1137 tem = op0;
1138 op0 = op1;
1139 op1 = tem;
1142 if (GET_CODE (op0) == code)
1144 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1145 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1147 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1148 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1151 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1152 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1153 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1154 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1155 if (tem != 0)
1156 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1158 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1159 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1160 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1161 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1162 if (tem != 0)
1163 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1166 return 0;
1170 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1171 and OP1. Return 0 if no simplification is possible.
1173 Don't use this for relational operations such as EQ or LT.
1174 Use simplify_relational_operation instead. */
1176 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1177 rtx op0, rtx op1)
1179 rtx trueop0, trueop1;
1180 rtx tem;
1182 /* Relational operations don't work here. We must know the mode
1183 of the operands in order to do the comparison correctly.
1184 Assuming a full word can give incorrect results.
1185 Consider comparing 128 with -128 in QImode. */
1186 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1187 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1189 /* Make sure the constant is second. */
1190 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1191 && swap_commutative_operands_p (op0, op1))
1193 tem = op0, op0 = op1, op1 = tem;
1196 trueop0 = avoid_constant_pool_reference (op0);
1197 trueop1 = avoid_constant_pool_reference (op1);
1199 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1200 if (tem)
1201 return tem;
1202 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1205 static rtx
1206 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1207 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1209 rtx tem;
1210 HOST_WIDE_INT val;
1211 unsigned int width = GET_MODE_BITSIZE (mode);
1213 /* Even if we can't compute a constant result,
1214 there are some cases worth simplifying. */
1216 switch (code)
1218 case PLUS:
1219 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1220 when x is NaN, infinite, or finite and nonzero. They aren't
1221 when x is -0 and the rounding mode is not towards -infinity,
1222 since (-0) + 0 is then 0. */
1223 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1224 return op0;
1226 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1227 transformations are safe even for IEEE. */
1228 if (GET_CODE (op0) == NEG)
1229 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1230 else if (GET_CODE (op1) == NEG)
1231 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1233 /* (~a) + 1 -> -a */
1234 if (INTEGRAL_MODE_P (mode)
1235 && GET_CODE (op0) == NOT
1236 && trueop1 == const1_rtx)
1237 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1239 /* Handle both-operands-constant cases. We can only add
1240 CONST_INTs to constants since the sum of relocatable symbols
1241 can't be handled by most assemblers. Don't add CONST_INT
1242 to CONST_INT since overflow won't be computed properly if wider
1243 than HOST_BITS_PER_WIDE_INT. */
1245 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1246 && GET_CODE (op1) == CONST_INT)
1247 return plus_constant (op0, INTVAL (op1));
1248 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1249 && GET_CODE (op0) == CONST_INT)
1250 return plus_constant (op1, INTVAL (op0));
1252 /* See if this is something like X * C - X or vice versa or
1253 if the multiplication is written as a shift. If so, we can
1254 distribute and make a new multiply, shift, or maybe just
1255 have X (if C is 2 in the example above). But don't make
1256 something more expensive than we had before. */
1258 if (! FLOAT_MODE_P (mode))
1260 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1261 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1262 rtx lhs = op0, rhs = op1;
1264 if (GET_CODE (lhs) == NEG)
1266 coeff0l = -1;
1267 coeff0h = -1;
1268 lhs = XEXP (lhs, 0);
1270 else if (GET_CODE (lhs) == MULT
1271 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1273 coeff0l = INTVAL (XEXP (lhs, 1));
1274 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1275 lhs = XEXP (lhs, 0);
1277 else if (GET_CODE (lhs) == ASHIFT
1278 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1279 && INTVAL (XEXP (lhs, 1)) >= 0
1280 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1282 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1283 coeff0h = 0;
1284 lhs = XEXP (lhs, 0);
1287 if (GET_CODE (rhs) == NEG)
1289 coeff1l = -1;
1290 coeff1h = -1;
1291 rhs = XEXP (rhs, 0);
1293 else if (GET_CODE (rhs) == MULT
1294 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1296 coeff1l = INTVAL (XEXP (rhs, 1));
1297 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1298 rhs = XEXP (rhs, 0);
1300 else if (GET_CODE (rhs) == ASHIFT
1301 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1302 && INTVAL (XEXP (rhs, 1)) >= 0
1303 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1305 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1306 coeff1h = 0;
1307 rhs = XEXP (rhs, 0);
1310 if (rtx_equal_p (lhs, rhs))
1312 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1313 rtx coeff;
1314 unsigned HOST_WIDE_INT l;
1315 HOST_WIDE_INT h;
1317 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1318 coeff = immed_double_const (l, h, mode);
1320 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1321 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1322 ? tem : 0;
1326 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1327 if ((GET_CODE (op1) == CONST_INT
1328 || GET_CODE (op1) == CONST_DOUBLE)
1329 && GET_CODE (op0) == XOR
1330 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1331 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1332 && mode_signbit_p (mode, op1))
1333 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1334 simplify_gen_binary (XOR, mode, op1,
1335 XEXP (op0, 1)));
1337 /* If one of the operands is a PLUS or a MINUS, see if we can
1338 simplify this by the associative law.
1339 Don't use the associative law for floating point.
1340 The inaccuracy makes it nonassociative,
1341 and subtle programs can break if operations are associated. */
1343 if (INTEGRAL_MODE_P (mode)
1344 && (plus_minus_operand_p (op0)
1345 || plus_minus_operand_p (op1))
1346 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1347 return tem;
1349 /* Reassociate floating point addition only when the user
1350 specifies unsafe math optimizations. */
1351 if (FLOAT_MODE_P (mode)
1352 && flag_unsafe_math_optimizations)
1354 tem = simplify_associative_operation (code, mode, op0, op1);
1355 if (tem)
1356 return tem;
1358 break;
1360 case COMPARE:
1361 #ifdef HAVE_cc0
1362 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1363 using cc0, in which case we want to leave it as a COMPARE
1364 so we can distinguish it from a register-register-copy.
1366 In IEEE floating point, x-0 is not the same as x. */
1368 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1369 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1370 && trueop1 == CONST0_RTX (mode))
1371 return op0;
1372 #endif
1374 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1375 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1376 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1377 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1379 rtx xop00 = XEXP (op0, 0);
1380 rtx xop10 = XEXP (op1, 0);
1382 #ifdef HAVE_cc0
1383 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1384 #else
1385 if (REG_P (xop00) && REG_P (xop10)
1386 && GET_MODE (xop00) == GET_MODE (xop10)
1387 && REGNO (xop00) == REGNO (xop10)
1388 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1389 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1390 #endif
1391 return xop00;
1393 break;
1395 case MINUS:
1396 /* We can't assume x-x is 0 even with non-IEEE floating point,
1397 but since it is zero except in very strange circumstances, we
1398 will treat it as zero with -funsafe-math-optimizations. */
1399 if (rtx_equal_p (trueop0, trueop1)
1400 && ! side_effects_p (op0)
1401 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1402 return CONST0_RTX (mode);
1404 /* Change subtraction from zero into negation. (0 - x) is the
1405 same as -x when x is NaN, infinite, or finite and nonzero.
1406 But if the mode has signed zeros, and does not round towards
1407 -infinity, then 0 - 0 is 0, not -0. */
1408 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1409 return simplify_gen_unary (NEG, mode, op1, mode);
1411 /* (-1 - a) is ~a. */
1412 if (trueop0 == constm1_rtx)
1413 return simplify_gen_unary (NOT, mode, op1, mode);
1415 /* Subtracting 0 has no effect unless the mode has signed zeros
1416 and supports rounding towards -infinity. In such a case,
1417 0 - 0 is -0. */
1418 if (!(HONOR_SIGNED_ZEROS (mode)
1419 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1420 && trueop1 == CONST0_RTX (mode))
1421 return op0;
1423 /* See if this is something like X * C - X or vice versa or
1424 if the multiplication is written as a shift. If so, we can
1425 distribute and make a new multiply, shift, or maybe just
1426 have X (if C is 2 in the example above). But don't make
1427 something more expensive than we had before. */
1429 if (! FLOAT_MODE_P (mode))
1431 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1432 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1433 rtx lhs = op0, rhs = op1;
1435 if (GET_CODE (lhs) == NEG)
1437 coeff0l = -1;
1438 coeff0h = -1;
1439 lhs = XEXP (lhs, 0);
1441 else if (GET_CODE (lhs) == MULT
1442 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1444 coeff0l = INTVAL (XEXP (lhs, 1));
1445 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1446 lhs = XEXP (lhs, 0);
1448 else if (GET_CODE (lhs) == ASHIFT
1449 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1450 && INTVAL (XEXP (lhs, 1)) >= 0
1451 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1453 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1454 coeff0h = 0;
1455 lhs = XEXP (lhs, 0);
1458 if (GET_CODE (rhs) == NEG)
1460 negcoeff1l = 1;
1461 negcoeff1h = 0;
1462 rhs = XEXP (rhs, 0);
1464 else if (GET_CODE (rhs) == MULT
1465 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1467 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1468 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1469 rhs = XEXP (rhs, 0);
1471 else if (GET_CODE (rhs) == ASHIFT
1472 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1473 && INTVAL (XEXP (rhs, 1)) >= 0
1474 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1476 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1477 negcoeff1h = -1;
1478 rhs = XEXP (rhs, 0);
1481 if (rtx_equal_p (lhs, rhs))
1483 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1484 rtx coeff;
1485 unsigned HOST_WIDE_INT l;
1486 HOST_WIDE_INT h;
1488 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1489 coeff = immed_double_const (l, h, mode);
1491 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1492 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1493 ? tem : 0;
1497 /* (a - (-b)) -> (a + b). True even for IEEE. */
1498 if (GET_CODE (op1) == NEG)
1499 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1501 /* (-x - c) may be simplified as (-c - x). */
1502 if (GET_CODE (op0) == NEG
1503 && (GET_CODE (op1) == CONST_INT
1504 || GET_CODE (op1) == CONST_DOUBLE))
1506 tem = simplify_unary_operation (NEG, mode, op1, mode);
1507 if (tem)
1508 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1511 /* If one of the operands is a PLUS or a MINUS, see if we can
1512 simplify this by the associative law.
1513 Don't use the associative law for floating point.
1514 The inaccuracy makes it nonassociative,
1515 and subtle programs can break if operations are associated. */
1517 if (INTEGRAL_MODE_P (mode)
1518 && (plus_minus_operand_p (op0)
1519 || plus_minus_operand_p (op1))
1520 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1521 return tem;
1523 /* Don't let a relocatable value get a negative coeff. */
1524 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1525 return simplify_gen_binary (PLUS, mode,
1526 op0,
1527 neg_const_int (mode, op1));
1529 /* (x - (x & y)) -> (x & ~y) */
1530 if (GET_CODE (op1) == AND)
1532 if (rtx_equal_p (op0, XEXP (op1, 0)))
1534 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1535 GET_MODE (XEXP (op1, 1)));
1536 return simplify_gen_binary (AND, mode, op0, tem);
1538 if (rtx_equal_p (op0, XEXP (op1, 1)))
1540 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1541 GET_MODE (XEXP (op1, 0)));
1542 return simplify_gen_binary (AND, mode, op0, tem);
1545 break;
1547 case MULT:
1548 if (trueop1 == constm1_rtx)
1549 return simplify_gen_unary (NEG, mode, op0, mode);
1551 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1552 x is NaN, since x * 0 is then also NaN. Nor is it valid
1553 when the mode has signed zeros, since multiplying a negative
1554 number by 0 will give -0, not 0. */
1555 if (!HONOR_NANS (mode)
1556 && !HONOR_SIGNED_ZEROS (mode)
1557 && trueop1 == CONST0_RTX (mode)
1558 && ! side_effects_p (op0))
1559 return op1;
1561 /* In IEEE floating point, x*1 is not equivalent to x for
1562 signalling NaNs. */
1563 if (!HONOR_SNANS (mode)
1564 && trueop1 == CONST1_RTX (mode))
1565 return op0;
1567 /* Convert multiply by constant power of two into shift unless
1568 we are still generating RTL. This test is a kludge. */
1569 if (GET_CODE (trueop1) == CONST_INT
1570 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1571 /* If the mode is larger than the host word size, and the
1572 uppermost bit is set, then this isn't a power of two due
1573 to implicit sign extension. */
1574 && (width <= HOST_BITS_PER_WIDE_INT
1575 || val != HOST_BITS_PER_WIDE_INT - 1))
1576 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1578 /* Likewise for multipliers wider than a word. */
1579 else if (GET_CODE (trueop1) == CONST_DOUBLE
1580 && (GET_MODE (trueop1) == VOIDmode
1581 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1582 && GET_MODE (op0) == mode
1583 && CONST_DOUBLE_LOW (trueop1) == 0
1584 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1585 return simplify_gen_binary (ASHIFT, mode, op0,
1586 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1588 /* x*2 is x+x and x*(-1) is -x */
1589 if (GET_CODE (trueop1) == CONST_DOUBLE
1590 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1591 && GET_MODE (op0) == mode)
1593 REAL_VALUE_TYPE d;
1594 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1596 if (REAL_VALUES_EQUAL (d, dconst2))
1597 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1599 if (REAL_VALUES_EQUAL (d, dconstm1))
1600 return simplify_gen_unary (NEG, mode, op0, mode);
1603 /* Reassociate multiplication, but for floating point MULTs
1604 only when the user specifies unsafe math optimizations. */
1605 if (! FLOAT_MODE_P (mode)
1606 || flag_unsafe_math_optimizations)
1608 tem = simplify_associative_operation (code, mode, op0, op1);
1609 if (tem)
1610 return tem;
1612 break;
1614 case IOR:
1615 if (trueop1 == const0_rtx)
1616 return op0;
1617 if (GET_CODE (trueop1) == CONST_INT
1618 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1619 == GET_MODE_MASK (mode)))
1620 return op1;
1621 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1622 return op0;
1623 /* A | (~A) -> -1 */
1624 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1625 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1626 && ! side_effects_p (op0)
1627 && GET_MODE_CLASS (mode) != MODE_CC)
1628 return constm1_rtx;
1629 tem = simplify_associative_operation (code, mode, op0, op1);
1630 if (tem)
1631 return tem;
1632 break;
1634 case XOR:
1635 if (trueop1 == const0_rtx)
1636 return op0;
1637 if (GET_CODE (trueop1) == CONST_INT
1638 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1639 == GET_MODE_MASK (mode)))
1640 return simplify_gen_unary (NOT, mode, op0, mode);
1641 if (trueop0 == trueop1
1642 && ! side_effects_p (op0)
1643 && GET_MODE_CLASS (mode) != MODE_CC)
1644 return CONST0_RTX (mode);
1646 /* Canonicalize XOR of the most significant bit to PLUS. */
1647 if ((GET_CODE (op1) == CONST_INT
1648 || GET_CODE (op1) == CONST_DOUBLE)
1649 && mode_signbit_p (mode, op1))
1650 return simplify_gen_binary (PLUS, mode, op0, op1);
1651 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1652 if ((GET_CODE (op1) == CONST_INT
1653 || GET_CODE (op1) == CONST_DOUBLE)
1654 && GET_CODE (op0) == PLUS
1655 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1656 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1657 && mode_signbit_p (mode, XEXP (op0, 1)))
1658 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1659 simplify_gen_binary (XOR, mode, op1,
1660 XEXP (op0, 1)));
1662 tem = simplify_associative_operation (code, mode, op0, op1);
1663 if (tem)
1664 return tem;
1665 break;
1667 case AND:
1668 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1669 return const0_rtx;
1670 /* If we are turning off bits already known off in OP0, we need
1671 not do an AND. */
1672 if (GET_CODE (trueop1) == CONST_INT
1673 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1674 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1675 return op0;
1676 if (trueop0 == trueop1 && ! side_effects_p (op0)
1677 && GET_MODE_CLASS (mode) != MODE_CC)
1678 return op0;
1679 /* A & (~A) -> 0 */
1680 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1681 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1682 && ! side_effects_p (op0)
1683 && GET_MODE_CLASS (mode) != MODE_CC)
1684 return const0_rtx;
1686 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1687 there are no nonzero bits of C outside of X's mode. */
1688 if ((GET_CODE (op0) == SIGN_EXTEND
1689 || GET_CODE (op0) == ZERO_EXTEND)
1690 && GET_CODE (trueop1) == CONST_INT
1691 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1692 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1693 & INTVAL (trueop1)) == 0)
1695 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1696 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1697 gen_int_mode (INTVAL (trueop1),
1698 imode));
1699 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1702 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1703 ((A & N) + B) & M -> (A + B) & M
1704 Similarly if (N & M) == 0,
1705 ((A | N) + B) & M -> (A + B) & M
1706 and for - instead of + and/or ^ instead of |. */
1707 if (GET_CODE (trueop1) == CONST_INT
1708 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1709 && ~INTVAL (trueop1)
1710 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1711 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1713 rtx pmop[2];
1714 int which;
1716 pmop[0] = XEXP (op0, 0);
1717 pmop[1] = XEXP (op0, 1);
1719 for (which = 0; which < 2; which++)
1721 tem = pmop[which];
1722 switch (GET_CODE (tem))
1724 case AND:
1725 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1726 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1727 == INTVAL (trueop1))
1728 pmop[which] = XEXP (tem, 0);
1729 break;
1730 case IOR:
1731 case XOR:
1732 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1733 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1734 pmop[which] = XEXP (tem, 0);
1735 break;
1736 default:
1737 break;
1741 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1743 tem = simplify_gen_binary (GET_CODE (op0), mode,
1744 pmop[0], pmop[1]);
1745 return simplify_gen_binary (code, mode, tem, op1);
1748 tem = simplify_associative_operation (code, mode, op0, op1);
1749 if (tem)
1750 return tem;
1751 break;
1753 case UDIV:
1754 /* 0/x is 0 (or x&0 if x has side-effects). */
1755 if (trueop0 == const0_rtx)
1756 return side_effects_p (op1)
1757 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1758 : const0_rtx;
1759 /* x/1 is x. */
1760 if (trueop1 == const1_rtx)
1761 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1762 /* Convert divide by power of two into shift. */
1763 if (GET_CODE (trueop1) == CONST_INT
1764 && (val = exact_log2 (INTVAL (trueop1))) > 0)
1765 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
1766 break;
1768 case DIV:
1769 /* Handle floating point and integers separately. */
1770 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1772 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1773 safe for modes with NaNs, since 0.0 / 0.0 will then be
1774 NaN rather than 0.0. Nor is it safe for modes with signed
1775 zeros, since dividing 0 by a negative number gives -0.0 */
1776 if (trueop0 == CONST0_RTX (mode)
1777 && !HONOR_NANS (mode)
1778 && !HONOR_SIGNED_ZEROS (mode)
1779 && ! side_effects_p (op1))
1780 return op0;
1781 /* x/1.0 is x. */
1782 if (trueop1 == CONST1_RTX (mode)
1783 && !HONOR_SNANS (mode))
1784 return op0;
1786 if (GET_CODE (trueop1) == CONST_DOUBLE
1787 && trueop1 != CONST0_RTX (mode))
1789 REAL_VALUE_TYPE d;
1790 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1792 /* x/-1.0 is -x. */
1793 if (REAL_VALUES_EQUAL (d, dconstm1)
1794 && !HONOR_SNANS (mode))
1795 return simplify_gen_unary (NEG, mode, op0, mode);
1797 /* Change FP division by a constant into multiplication.
1798 Only do this with -funsafe-math-optimizations. */
1799 if (flag_unsafe_math_optimizations
1800 && !REAL_VALUES_EQUAL (d, dconst0))
1802 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1803 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1804 return simplify_gen_binary (MULT, mode, op0, tem);
1808 else
1810 /* 0/x is 0 (or x&0 if x has side-effects). */
1811 if (trueop0 == const0_rtx)
1812 return side_effects_p (op1)
1813 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1814 : const0_rtx;
1815 /* x/1 is x. */
1816 if (trueop1 == const1_rtx)
1817 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
1818 /* x/-1 is -x. */
1819 if (trueop1 == constm1_rtx)
1821 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
1822 return simplify_gen_unary (NEG, mode, x, mode);
1825 break;
1827 case UMOD:
1828 /* 0%x is 0 (or x&0 if x has side-effects). */
1829 if (trueop0 == const0_rtx)
1830 return side_effects_p (op1)
1831 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1832 : const0_rtx;
1833 /* x%1 is 0 (of x&0 if x has side-effects). */
1834 if (trueop1 == const1_rtx)
1835 return side_effects_p (op0)
1836 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
1837 : const0_rtx;
1838 /* Implement modulus by power of two as AND. */
1839 if (GET_CODE (trueop1) == CONST_INT
1840 && exact_log2 (INTVAL (trueop1)) > 0)
1841 return simplify_gen_binary (AND, mode, op0,
1842 GEN_INT (INTVAL (op1) - 1));
1843 break;
1845 case MOD:
1846 /* 0%x is 0 (or x&0 if x has side-effects). */
1847 if (trueop0 == const0_rtx)
1848 return side_effects_p (op1)
1849 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1850 : const0_rtx;
1851 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1852 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
1853 return side_effects_p (op0)
1854 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
1855 : const0_rtx;
1856 break;
1858 case ROTATERT:
1859 case ROTATE:
1860 case ASHIFTRT:
1861 /* Rotating ~0 always results in ~0. */
1862 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1863 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1864 && ! side_effects_p (op1))
1865 return op0;
1867 /* Fall through.... */
1869 case ASHIFT:
1870 case LSHIFTRT:
1871 if (trueop1 == const0_rtx)
1872 return op0;
1873 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1874 return op0;
1875 break;
1877 case SMIN:
1878 if (width <= HOST_BITS_PER_WIDE_INT
1879 && GET_CODE (trueop1) == CONST_INT
1880 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1881 && ! side_effects_p (op0))
1882 return op1;
1883 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1884 return op0;
1885 tem = simplify_associative_operation (code, mode, op0, op1);
1886 if (tem)
1887 return tem;
1888 break;
1890 case SMAX:
1891 if (width <= HOST_BITS_PER_WIDE_INT
1892 && GET_CODE (trueop1) == CONST_INT
1893 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1894 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1895 && ! side_effects_p (op0))
1896 return op1;
1897 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1898 return op0;
1899 tem = simplify_associative_operation (code, mode, op0, op1);
1900 if (tem)
1901 return tem;
1902 break;
1904 case UMIN:
1905 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1906 return op1;
1907 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1908 return op0;
1909 tem = simplify_associative_operation (code, mode, op0, op1);
1910 if (tem)
1911 return tem;
1912 break;
1914 case UMAX:
1915 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1916 return op1;
1917 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1918 return op0;
1919 tem = simplify_associative_operation (code, mode, op0, op1);
1920 if (tem)
1921 return tem;
1922 break;
1924 case SS_PLUS:
1925 case US_PLUS:
1926 case SS_MINUS:
1927 case US_MINUS:
1928 /* ??? There are simplifications that can be done. */
1929 return 0;
1931 case VEC_SELECT:
1932 if (!VECTOR_MODE_P (mode))
1934 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1935 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
1936 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1937 gcc_assert (XVECLEN (trueop1, 0) == 1);
1938 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
1940 if (GET_CODE (trueop0) == CONST_VECTOR)
1941 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
1942 (trueop1, 0, 0)));
1944 else
1946 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
1947 gcc_assert (GET_MODE_INNER (mode)
1948 == GET_MODE_INNER (GET_MODE (trueop0)));
1949 gcc_assert (GET_CODE (trueop1) == PARALLEL);
1951 if (GET_CODE (trueop0) == CONST_VECTOR)
1953 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1954 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1955 rtvec v = rtvec_alloc (n_elts);
1956 unsigned int i;
1958 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
1959 for (i = 0; i < n_elts; i++)
1961 rtx x = XVECEXP (trueop1, 0, i);
1963 gcc_assert (GET_CODE (x) == CONST_INT);
1964 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
1965 INTVAL (x));
1968 return gen_rtx_CONST_VECTOR (mode, v);
1971 return 0;
1972 case VEC_CONCAT:
1974 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1975 ? GET_MODE (trueop0)
1976 : GET_MODE_INNER (mode));
1977 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1978 ? GET_MODE (trueop1)
1979 : GET_MODE_INNER (mode));
1981 gcc_assert (VECTOR_MODE_P (mode));
1982 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1983 == GET_MODE_SIZE (mode));
1985 if (VECTOR_MODE_P (op0_mode))
1986 gcc_assert (GET_MODE_INNER (mode)
1987 == GET_MODE_INNER (op0_mode));
1988 else
1989 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
1991 if (VECTOR_MODE_P (op1_mode))
1992 gcc_assert (GET_MODE_INNER (mode)
1993 == GET_MODE_INNER (op1_mode));
1994 else
1995 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
1997 if ((GET_CODE (trueop0) == CONST_VECTOR
1998 || GET_CODE (trueop0) == CONST_INT
1999 || GET_CODE (trueop0) == CONST_DOUBLE)
2000 && (GET_CODE (trueop1) == CONST_VECTOR
2001 || GET_CODE (trueop1) == CONST_INT
2002 || GET_CODE (trueop1) == CONST_DOUBLE))
2004 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2005 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2006 rtvec v = rtvec_alloc (n_elts);
2007 unsigned int i;
2008 unsigned in_n_elts = 1;
2010 if (VECTOR_MODE_P (op0_mode))
2011 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2012 for (i = 0; i < n_elts; i++)
2014 if (i < in_n_elts)
2016 if (!VECTOR_MODE_P (op0_mode))
2017 RTVEC_ELT (v, i) = trueop0;
2018 else
2019 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2021 else
2023 if (!VECTOR_MODE_P (op1_mode))
2024 RTVEC_ELT (v, i) = trueop1;
2025 else
2026 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2027 i - in_n_elts);
2031 return gen_rtx_CONST_VECTOR (mode, v);
2034 return 0;
2036 default:
2037 gcc_unreachable ();
2040 return 0;
2044 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2045 rtx op0, rtx op1)
2047 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2048 HOST_WIDE_INT val;
2049 unsigned int width = GET_MODE_BITSIZE (mode);
2051 if (VECTOR_MODE_P (mode)
2052 && code != VEC_CONCAT
2053 && GET_CODE (op0) == CONST_VECTOR
2054 && GET_CODE (op1) == CONST_VECTOR)
2056 unsigned n_elts = GET_MODE_NUNITS (mode);
2057 enum machine_mode op0mode = GET_MODE (op0);
2058 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2059 enum machine_mode op1mode = GET_MODE (op1);
2060 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2061 rtvec v = rtvec_alloc (n_elts);
2062 unsigned int i;
2064 gcc_assert (op0_n_elts == n_elts);
2065 gcc_assert (op1_n_elts == n_elts);
2066 for (i = 0; i < n_elts; i++)
2068 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2069 CONST_VECTOR_ELT (op0, i),
2070 CONST_VECTOR_ELT (op1, i));
2071 if (!x)
2072 return 0;
2073 RTVEC_ELT (v, i) = x;
2076 return gen_rtx_CONST_VECTOR (mode, v);
2079 if (VECTOR_MODE_P (mode)
2080 && code == VEC_CONCAT
2081 && CONSTANT_P (op0) && CONSTANT_P (op1))
2083 unsigned n_elts = GET_MODE_NUNITS (mode);
2084 rtvec v = rtvec_alloc (n_elts);
2086 gcc_assert (n_elts >= 2);
2087 if (n_elts == 2)
2089 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2090 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2092 RTVEC_ELT (v, 0) = op0;
2093 RTVEC_ELT (v, 1) = op1;
2095 else
2097 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2098 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2099 unsigned i;
2101 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2102 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2103 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2105 for (i = 0; i < op0_n_elts; ++i)
2106 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2107 for (i = 0; i < op1_n_elts; ++i)
2108 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2111 return gen_rtx_CONST_VECTOR (mode, v);
2114 if (GET_MODE_CLASS (mode) == MODE_FLOAT
2115 && GET_CODE (op0) == CONST_DOUBLE
2116 && GET_CODE (op1) == CONST_DOUBLE
2117 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2119 if (code == AND
2120 || code == IOR
2121 || code == XOR)
2123 long tmp0[4];
2124 long tmp1[4];
2125 REAL_VALUE_TYPE r;
2126 int i;
2128 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2129 GET_MODE (op0));
2130 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2131 GET_MODE (op1));
2132 for (i = 0; i < 4; i++)
2134 switch (code)
2136 case AND:
2137 tmp0[i] &= tmp1[i];
2138 break;
2139 case IOR:
2140 tmp0[i] |= tmp1[i];
2141 break;
2142 case XOR:
2143 tmp0[i] ^= tmp1[i];
2144 break;
2145 default:
2146 gcc_unreachable ();
2149 real_from_target (&r, tmp0, mode);
2150 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2152 else
2154 REAL_VALUE_TYPE f0, f1, value, result;
2155 bool inexact;
2157 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2158 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2159 real_convert (&f0, mode, &f0);
2160 real_convert (&f1, mode, &f1);
2162 if (HONOR_SNANS (mode)
2163 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2164 return 0;
2166 if (code == DIV
2167 && REAL_VALUES_EQUAL (f1, dconst0)
2168 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2169 return 0;
2171 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2172 && flag_trapping_math
2173 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2175 int s0 = REAL_VALUE_NEGATIVE (f0);
2176 int s1 = REAL_VALUE_NEGATIVE (f1);
2178 switch (code)
2180 case PLUS:
2181 /* Inf + -Inf = NaN plus exception. */
2182 if (s0 != s1)
2183 return 0;
2184 break;
2185 case MINUS:
2186 /* Inf - Inf = NaN plus exception. */
2187 if (s0 == s1)
2188 return 0;
2189 break;
2190 case DIV:
2191 /* Inf / Inf = NaN plus exception. */
2192 return 0;
2193 default:
2194 break;
2198 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2199 && flag_trapping_math
2200 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2201 || (REAL_VALUE_ISINF (f1)
2202 && REAL_VALUES_EQUAL (f0, dconst0))))
2203 /* Inf * 0 = NaN plus exception. */
2204 return 0;
2206 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2207 &f0, &f1);
2208 real_convert (&result, mode, &value);
2210 /* Don't constant fold this floating point operation if the
2211 result may dependent upon the run-time rounding mode and
2212 flag_rounding_math is set, or if GCC's software emulation
2213 is unable to accurately represent the result. */
2215 if ((flag_rounding_math
2216 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2217 && !flag_unsafe_math_optimizations))
2218 && (inexact || !real_identical (&result, &value)))
2219 return NULL_RTX;
2221 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2225 /* We can fold some multi-word operations. */
2226 if (GET_MODE_CLASS (mode) == MODE_INT
2227 && width == HOST_BITS_PER_WIDE_INT * 2
2228 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2229 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2231 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2232 HOST_WIDE_INT h1, h2, hv, ht;
2234 if (GET_CODE (op0) == CONST_DOUBLE)
2235 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2236 else
2237 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2239 if (GET_CODE (op1) == CONST_DOUBLE)
2240 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2241 else
2242 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2244 switch (code)
2246 case MINUS:
2247 /* A - B == A + (-B). */
2248 neg_double (l2, h2, &lv, &hv);
2249 l2 = lv, h2 = hv;
2251 /* Fall through.... */
2253 case PLUS:
2254 add_double (l1, h1, l2, h2, &lv, &hv);
2255 break;
2257 case MULT:
2258 mul_double (l1, h1, l2, h2, &lv, &hv);
2259 break;
2261 case DIV:
2262 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2263 &lv, &hv, &lt, &ht))
2264 return 0;
2265 break;
2267 case MOD:
2268 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2269 &lt, &ht, &lv, &hv))
2270 return 0;
2271 break;
2273 case UDIV:
2274 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2275 &lv, &hv, &lt, &ht))
2276 return 0;
2277 break;
2279 case UMOD:
2280 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2281 &lt, &ht, &lv, &hv))
2282 return 0;
2283 break;
2285 case AND:
2286 lv = l1 & l2, hv = h1 & h2;
2287 break;
2289 case IOR:
2290 lv = l1 | l2, hv = h1 | h2;
2291 break;
2293 case XOR:
2294 lv = l1 ^ l2, hv = h1 ^ h2;
2295 break;
2297 case SMIN:
2298 if (h1 < h2
2299 || (h1 == h2
2300 && ((unsigned HOST_WIDE_INT) l1
2301 < (unsigned HOST_WIDE_INT) l2)))
2302 lv = l1, hv = h1;
2303 else
2304 lv = l2, hv = h2;
2305 break;
2307 case SMAX:
2308 if (h1 > h2
2309 || (h1 == h2
2310 && ((unsigned HOST_WIDE_INT) l1
2311 > (unsigned HOST_WIDE_INT) l2)))
2312 lv = l1, hv = h1;
2313 else
2314 lv = l2, hv = h2;
2315 break;
2317 case UMIN:
2318 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2319 || (h1 == h2
2320 && ((unsigned HOST_WIDE_INT) l1
2321 < (unsigned HOST_WIDE_INT) l2)))
2322 lv = l1, hv = h1;
2323 else
2324 lv = l2, hv = h2;
2325 break;
2327 case UMAX:
2328 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2329 || (h1 == h2
2330 && ((unsigned HOST_WIDE_INT) l1
2331 > (unsigned HOST_WIDE_INT) l2)))
2332 lv = l1, hv = h1;
2333 else
2334 lv = l2, hv = h2;
2335 break;
2337 case LSHIFTRT: case ASHIFTRT:
2338 case ASHIFT:
2339 case ROTATE: case ROTATERT:
2340 if (SHIFT_COUNT_TRUNCATED)
2341 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2343 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2344 return 0;
2346 if (code == LSHIFTRT || code == ASHIFTRT)
2347 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2348 code == ASHIFTRT);
2349 else if (code == ASHIFT)
2350 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2351 else if (code == ROTATE)
2352 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2353 else /* code == ROTATERT */
2354 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2355 break;
2357 default:
2358 return 0;
2361 return immed_double_const (lv, hv, mode);
2364 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2365 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2367 /* Get the integer argument values in two forms:
2368 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2370 arg0 = INTVAL (op0);
2371 arg1 = INTVAL (op1);
2373 if (width < HOST_BITS_PER_WIDE_INT)
2375 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2376 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2378 arg0s = arg0;
2379 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2380 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2382 arg1s = arg1;
2383 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2384 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2386 else
2388 arg0s = arg0;
2389 arg1s = arg1;
2392 /* Compute the value of the arithmetic. */
2394 switch (code)
2396 case PLUS:
2397 val = arg0s + arg1s;
2398 break;
2400 case MINUS:
2401 val = arg0s - arg1s;
2402 break;
2404 case MULT:
2405 val = arg0s * arg1s;
2406 break;
2408 case DIV:
2409 if (arg1s == 0
2410 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2411 && arg1s == -1))
2412 return 0;
2413 val = arg0s / arg1s;
2414 break;
2416 case MOD:
2417 if (arg1s == 0
2418 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2419 && arg1s == -1))
2420 return 0;
2421 val = arg0s % arg1s;
2422 break;
2424 case UDIV:
2425 if (arg1 == 0
2426 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2427 && arg1s == -1))
2428 return 0;
2429 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2430 break;
2432 case UMOD:
2433 if (arg1 == 0
2434 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2435 && arg1s == -1))
2436 return 0;
2437 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2438 break;
2440 case AND:
2441 val = arg0 & arg1;
2442 break;
2444 case IOR:
2445 val = arg0 | arg1;
2446 break;
2448 case XOR:
2449 val = arg0 ^ arg1;
2450 break;
2452 case LSHIFTRT:
2453 case ASHIFT:
2454 case ASHIFTRT:
2455 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
2456 the value is in range. We can't return any old value for
2457 out-of-range arguments because either the middle-end (via
2458 shift_truncation_mask) or the back-end might be relying on
2459 target-specific knowledge. Nor can we rely on
2460 shift_truncation_mask, since the shift might not be part of an
2461 ashlM3, lshrM3 or ashrM3 instruction. */
2462 if (SHIFT_COUNT_TRUNCATED)
2463 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2464 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2465 return 0;
2467 val = (code == ASHIFT
2468 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2469 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2471 /* Sign-extend the result for arithmetic right shifts. */
2472 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2473 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2474 break;
2476 case ROTATERT:
2477 if (arg1 < 0)
2478 return 0;
2480 arg1 %= width;
2481 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2482 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2483 break;
2485 case ROTATE:
2486 if (arg1 < 0)
2487 return 0;
2489 arg1 %= width;
2490 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2491 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2492 break;
2494 case COMPARE:
2495 /* Do nothing here. */
2496 return 0;
2498 case SMIN:
2499 val = arg0s <= arg1s ? arg0s : arg1s;
2500 break;
2502 case UMIN:
2503 val = ((unsigned HOST_WIDE_INT) arg0
2504 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2505 break;
2507 case SMAX:
2508 val = arg0s > arg1s ? arg0s : arg1s;
2509 break;
2511 case UMAX:
2512 val = ((unsigned HOST_WIDE_INT) arg0
2513 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2514 break;
2516 case SS_PLUS:
2517 case US_PLUS:
2518 case SS_MINUS:
2519 case US_MINUS:
2520 /* ??? There are simplifications that can be done. */
2521 return 0;
2523 default:
2524 gcc_unreachable ();
2527 return gen_int_mode (val, mode);
2530 return NULL_RTX;
2535 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2536 PLUS or MINUS.
2538 Rather than test for specific case, we do this by a brute-force method
2539 and do all possible simplifications until no more changes occur. Then
2540 we rebuild the operation.
2542 If FORCE is true, then always generate the rtx. This is used to
2543 canonicalize stuff emitted from simplify_gen_binary. Note that this
2544 can still fail if the rtx is too complex. It won't fail just because
2545 the result is not 'simpler' than the input, however. */
2547 struct simplify_plus_minus_op_data
2549 rtx op;
2550 int neg;
2553 static int
2554 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2556 const struct simplify_plus_minus_op_data *d1 = p1;
2557 const struct simplify_plus_minus_op_data *d2 = p2;
2559 return (commutative_operand_precedence (d2->op)
2560 - commutative_operand_precedence (d1->op));
2563 static rtx
2564 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2565 rtx op1, int force)
2567 struct simplify_plus_minus_op_data ops[8];
2568 rtx result, tem;
2569 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2570 int first, changed;
2571 int i, j;
2573 memset (ops, 0, sizeof ops);
2575 /* Set up the two operands and then expand them until nothing has been
2576 changed. If we run out of room in our array, give up; this should
2577 almost never happen. */
2579 ops[0].op = op0;
2580 ops[0].neg = 0;
2581 ops[1].op = op1;
2582 ops[1].neg = (code == MINUS);
2586 changed = 0;
2588 for (i = 0; i < n_ops; i++)
2590 rtx this_op = ops[i].op;
2591 int this_neg = ops[i].neg;
2592 enum rtx_code this_code = GET_CODE (this_op);
2594 switch (this_code)
2596 case PLUS:
2597 case MINUS:
2598 if (n_ops == 7)
2599 return NULL_RTX;
2601 ops[n_ops].op = XEXP (this_op, 1);
2602 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2603 n_ops++;
2605 ops[i].op = XEXP (this_op, 0);
2606 input_ops++;
2607 changed = 1;
2608 break;
2610 case NEG:
2611 ops[i].op = XEXP (this_op, 0);
2612 ops[i].neg = ! this_neg;
2613 changed = 1;
2614 break;
2616 case CONST:
2617 if (n_ops < 7
2618 && GET_CODE (XEXP (this_op, 0)) == PLUS
2619 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2620 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2622 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2623 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2624 ops[n_ops].neg = this_neg;
2625 n_ops++;
2626 input_consts++;
2627 changed = 1;
2629 break;
2631 case NOT:
2632 /* ~a -> (-a - 1) */
2633 if (n_ops != 7)
2635 ops[n_ops].op = constm1_rtx;
2636 ops[n_ops++].neg = this_neg;
2637 ops[i].op = XEXP (this_op, 0);
2638 ops[i].neg = !this_neg;
2639 changed = 1;
2641 break;
2643 case CONST_INT:
2644 if (this_neg)
2646 ops[i].op = neg_const_int (mode, this_op);
2647 ops[i].neg = 0;
2648 changed = 1;
2650 break;
2652 default:
2653 break;
2657 while (changed);
2659 /* If we only have two operands, we can't do anything. */
2660 if (n_ops <= 2 && !force)
2661 return NULL_RTX;
2663 /* Count the number of CONSTs we didn't split above. */
2664 for (i = 0; i < n_ops; i++)
2665 if (GET_CODE (ops[i].op) == CONST)
2666 input_consts++;
2668 /* Now simplify each pair of operands until nothing changes. The first
2669 time through just simplify constants against each other. */
2671 first = 1;
2674 changed = first;
2676 for (i = 0; i < n_ops - 1; i++)
2677 for (j = i + 1; j < n_ops; j++)
2679 rtx lhs = ops[i].op, rhs = ops[j].op;
2680 int lneg = ops[i].neg, rneg = ops[j].neg;
2682 if (lhs != 0 && rhs != 0
2683 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2685 enum rtx_code ncode = PLUS;
2687 if (lneg != rneg)
2689 ncode = MINUS;
2690 if (lneg)
2691 tem = lhs, lhs = rhs, rhs = tem;
2693 else if (swap_commutative_operands_p (lhs, rhs))
2694 tem = lhs, lhs = rhs, rhs = tem;
2696 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2698 /* Reject "simplifications" that just wrap the two
2699 arguments in a CONST. Failure to do so can result
2700 in infinite recursion with simplify_binary_operation
2701 when it calls us to simplify CONST operations. */
2702 if (tem
2703 && ! (GET_CODE (tem) == CONST
2704 && GET_CODE (XEXP (tem, 0)) == ncode
2705 && XEXP (XEXP (tem, 0), 0) == lhs
2706 && XEXP (XEXP (tem, 0), 1) == rhs)
2707 /* Don't allow -x + -1 -> ~x simplifications in the
2708 first pass. This allows us the chance to combine
2709 the -1 with other constants. */
2710 && ! (first
2711 && GET_CODE (tem) == NOT
2712 && XEXP (tem, 0) == rhs))
2714 lneg &= rneg;
2715 if (GET_CODE (tem) == NEG)
2716 tem = XEXP (tem, 0), lneg = !lneg;
2717 if (GET_CODE (tem) == CONST_INT && lneg)
2718 tem = neg_const_int (mode, tem), lneg = 0;
2720 ops[i].op = tem;
2721 ops[i].neg = lneg;
2722 ops[j].op = NULL_RTX;
2723 changed = 1;
2728 first = 0;
2730 while (changed);
2732 /* Pack all the operands to the lower-numbered entries. */
2733 for (i = 0, j = 0; j < n_ops; j++)
2734 if (ops[j].op)
2735 ops[i++] = ops[j];
2736 n_ops = i;
2738 /* Sort the operations based on swap_commutative_operands_p. */
2739 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2741 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2742 if (n_ops == 2
2743 && GET_CODE (ops[1].op) == CONST_INT
2744 && CONSTANT_P (ops[0].op)
2745 && ops[0].neg)
2746 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2748 /* We suppressed creation of trivial CONST expressions in the
2749 combination loop to avoid recursion. Create one manually now.
2750 The combination loop should have ensured that there is exactly
2751 one CONST_INT, and the sort will have ensured that it is last
2752 in the array and that any other constant will be next-to-last. */
2754 if (n_ops > 1
2755 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2756 && CONSTANT_P (ops[n_ops - 2].op))
2758 rtx value = ops[n_ops - 1].op;
2759 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2760 value = neg_const_int (mode, value);
2761 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2762 n_ops--;
2765 /* Count the number of CONSTs that we generated. */
2766 n_consts = 0;
2767 for (i = 0; i < n_ops; i++)
2768 if (GET_CODE (ops[i].op) == CONST)
2769 n_consts++;
2771 /* Give up if we didn't reduce the number of operands we had. Make
2772 sure we count a CONST as two operands. If we have the same
2773 number of operands, but have made more CONSTs than before, this
2774 is also an improvement, so accept it. */
2775 if (!force
2776 && (n_ops + n_consts > input_ops
2777 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2778 return NULL_RTX;
2780 /* Put a non-negated operand first, if possible. */
2782 for (i = 0; i < n_ops && ops[i].neg; i++)
2783 continue;
2784 if (i == n_ops)
2785 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2786 else if (i != 0)
2788 tem = ops[0].op;
2789 ops[0] = ops[i];
2790 ops[i].op = tem;
2791 ops[i].neg = 1;
2794 /* Now make the result by performing the requested operations. */
2795 result = ops[0].op;
2796 for (i = 1; i < n_ops; i++)
2797 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2798 mode, result, ops[i].op);
2800 return result;
2803 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2804 static bool
2805 plus_minus_operand_p (rtx x)
2807 return GET_CODE (x) == PLUS
2808 || GET_CODE (x) == MINUS
2809 || (GET_CODE (x) == CONST
2810 && GET_CODE (XEXP (x, 0)) == PLUS
2811 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2812 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2815 /* Like simplify_binary_operation except used for relational operators.
2816 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2817 not also be VOIDmode.
2819 CMP_MODE specifies in which mode the comparison is done in, so it is
2820 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2821 the operands or, if both are VOIDmode, the operands are compared in
2822 "infinite precision". */
2824 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2825 enum machine_mode cmp_mode, rtx op0, rtx op1)
2827 rtx tem, trueop0, trueop1;
2829 if (cmp_mode == VOIDmode)
2830 cmp_mode = GET_MODE (op0);
2831 if (cmp_mode == VOIDmode)
2832 cmp_mode = GET_MODE (op1);
2834 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2835 if (tem)
2837 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2839 if (tem == const0_rtx)
2840 return CONST0_RTX (mode);
2841 #ifdef FLOAT_STORE_FLAG_VALUE
2843 REAL_VALUE_TYPE val;
2844 val = FLOAT_STORE_FLAG_VALUE (mode);
2845 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2847 #else
2848 return NULL_RTX;
2849 #endif
2851 if (VECTOR_MODE_P (mode))
2853 if (tem == const0_rtx)
2854 return CONST0_RTX (mode);
2855 #ifdef VECTOR_STORE_FLAG_VALUE
2857 int i, units;
2858 rtvec v;
2860 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2861 if (val == NULL_RTX)
2862 return NULL_RTX;
2863 if (val == const1_rtx)
2864 return CONST1_RTX (mode);
2866 units = GET_MODE_NUNITS (mode);
2867 v = rtvec_alloc (units);
2868 for (i = 0; i < units; i++)
2869 RTVEC_ELT (v, i) = val;
2870 return gen_rtx_raw_CONST_VECTOR (mode, v);
2872 #else
2873 return NULL_RTX;
2874 #endif
2877 return tem;
2880 /* For the following tests, ensure const0_rtx is op1. */
2881 if (swap_commutative_operands_p (op0, op1)
2882 || (op0 == const0_rtx && op1 != const0_rtx))
2883 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2885 /* If op0 is a compare, extract the comparison arguments from it. */
2886 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2887 return simplify_relational_operation (code, mode, VOIDmode,
2888 XEXP (op0, 0), XEXP (op0, 1));
2890 if (mode == VOIDmode
2891 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2892 || CC0_P (op0))
2893 return NULL_RTX;
2895 trueop0 = avoid_constant_pool_reference (op0);
2896 trueop1 = avoid_constant_pool_reference (op1);
2897 return simplify_relational_operation_1 (code, mode, cmp_mode,
2898 trueop0, trueop1);
2901 /* This part of simplify_relational_operation is only used when CMP_MODE
2902 is not in class MODE_CC (i.e. it is a real comparison).
2904 MODE is the mode of the result, while CMP_MODE specifies in which
2905 mode the comparison is done in, so it is the mode of the operands. */
2907 static rtx
2908 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2909 enum machine_mode cmp_mode, rtx op0, rtx op1)
2911 enum rtx_code op0code = GET_CODE (op0);
2913 if (GET_CODE (op1) == CONST_INT)
2915 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2917 /* If op0 is a comparison, extract the comparison arguments form it. */
2918 if (code == NE)
2920 if (GET_MODE (op0) == mode)
2921 return simplify_rtx (op0);
2922 else
2923 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2924 XEXP (op0, 0), XEXP (op0, 1));
2926 else if (code == EQ)
2928 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2929 if (new_code != UNKNOWN)
2930 return simplify_gen_relational (new_code, mode, VOIDmode,
2931 XEXP (op0, 0), XEXP (op0, 1));
2936 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2937 if ((code == EQ || code == NE)
2938 && (op0code == PLUS || op0code == MINUS)
2939 && CONSTANT_P (op1)
2940 && CONSTANT_P (XEXP (op0, 1))
2941 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
2943 rtx x = XEXP (op0, 0);
2944 rtx c = XEXP (op0, 1);
2946 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
2947 cmp_mode, op1, c);
2948 return simplify_gen_relational (code, mode, cmp_mode, x, c);
2951 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
2952 the same as (zero_extract:SI FOO (const_int 1) BAR). */
2953 if (code == NE
2954 && op1 == const0_rtx
2955 && GET_MODE_CLASS (mode) == MODE_INT
2956 && cmp_mode != VOIDmode
2957 /* ??? Work-around BImode bugs in the ia64 backend. */
2958 && mode != BImode
2959 && cmp_mode != BImode
2960 && nonzero_bits (op0, cmp_mode) == 1
2961 && STORE_FLAG_VALUE == 1)
2962 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
2963 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
2964 : lowpart_subreg (mode, op0, cmp_mode);
2966 return NULL_RTX;
2969 /* Check if the given comparison (done in the given MODE) is actually a
2970 tautology or a contradiction.
2971 If no simplification is possible, this function returns zero.
2972 Otherwise, it returns either const_true_rtx or const0_rtx. */
2975 simplify_const_relational_operation (enum rtx_code code,
2976 enum machine_mode mode,
2977 rtx op0, rtx op1)
2979 int equal, op0lt, op0ltu, op1lt, op1ltu;
2980 rtx tem;
2981 rtx trueop0;
2982 rtx trueop1;
2984 gcc_assert (mode != VOIDmode
2985 || (GET_MODE (op0) == VOIDmode
2986 && GET_MODE (op1) == VOIDmode));
2988 /* If op0 is a compare, extract the comparison arguments from it. */
2989 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2990 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2992 /* We can't simplify MODE_CC values since we don't know what the
2993 actual comparison is. */
2994 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2995 return 0;
2997 /* Make sure the constant is second. */
2998 if (swap_commutative_operands_p (op0, op1))
3000 tem = op0, op0 = op1, op1 = tem;
3001 code = swap_condition (code);
3004 trueop0 = avoid_constant_pool_reference (op0);
3005 trueop1 = avoid_constant_pool_reference (op1);
3007 /* For integer comparisons of A and B maybe we can simplify A - B and can
3008 then simplify a comparison of that with zero. If A and B are both either
3009 a register or a CONST_INT, this can't help; testing for these cases will
3010 prevent infinite recursion here and speed things up.
3012 If CODE is an unsigned comparison, then we can never do this optimization,
3013 because it gives an incorrect result if the subtraction wraps around zero.
3014 ANSI C defines unsigned operations such that they never overflow, and
3015 thus such cases can not be ignored; but we cannot do it even for
3016 signed comparisons for languages such as Java, so test flag_wrapv. */
3018 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3019 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3020 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3021 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3022 /* We cannot do this for == or != if tem is a nonzero address. */
3023 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3024 && code != GTU && code != GEU && code != LTU && code != LEU)
3025 return simplify_const_relational_operation (signed_condition (code),
3026 mode, tem, const0_rtx);
3028 if (flag_unsafe_math_optimizations && code == ORDERED)
3029 return const_true_rtx;
3031 if (flag_unsafe_math_optimizations && code == UNORDERED)
3032 return const0_rtx;
3034 /* For modes without NaNs, if the two operands are equal, we know the
3035 result except if they have side-effects. */
3036 if (! HONOR_NANS (GET_MODE (trueop0))
3037 && rtx_equal_p (trueop0, trueop1)
3038 && ! side_effects_p (trueop0))
3039 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3041 /* If the operands are floating-point constants, see if we can fold
3042 the result. */
3043 else if (GET_CODE (trueop0) == CONST_DOUBLE
3044 && GET_CODE (trueop1) == CONST_DOUBLE
3045 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
3047 REAL_VALUE_TYPE d0, d1;
3049 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3050 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3052 /* Comparisons are unordered iff at least one of the values is NaN. */
3053 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3054 switch (code)
3056 case UNEQ:
3057 case UNLT:
3058 case UNGT:
3059 case UNLE:
3060 case UNGE:
3061 case NE:
3062 case UNORDERED:
3063 return const_true_rtx;
3064 case EQ:
3065 case LT:
3066 case GT:
3067 case LE:
3068 case GE:
3069 case LTGT:
3070 case ORDERED:
3071 return const0_rtx;
3072 default:
3073 return 0;
3076 equal = REAL_VALUES_EQUAL (d0, d1);
3077 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3078 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3081 /* Otherwise, see if the operands are both integers. */
3082 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3083 && (GET_CODE (trueop0) == CONST_DOUBLE
3084 || GET_CODE (trueop0) == CONST_INT)
3085 && (GET_CODE (trueop1) == CONST_DOUBLE
3086 || GET_CODE (trueop1) == CONST_INT))
3088 int width = GET_MODE_BITSIZE (mode);
3089 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3090 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3092 /* Get the two words comprising each integer constant. */
3093 if (GET_CODE (trueop0) == CONST_DOUBLE)
3095 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3096 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3098 else
3100 l0u = l0s = INTVAL (trueop0);
3101 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3104 if (GET_CODE (trueop1) == CONST_DOUBLE)
3106 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3107 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3109 else
3111 l1u = l1s = INTVAL (trueop1);
3112 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3115 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3116 we have to sign or zero-extend the values. */
3117 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3119 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3120 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3122 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3123 l0s |= ((HOST_WIDE_INT) (-1) << width);
3125 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3126 l1s |= ((HOST_WIDE_INT) (-1) << width);
3128 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3129 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3131 equal = (h0u == h1u && l0u == l1u);
3132 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3133 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3134 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3135 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3138 /* Otherwise, there are some code-specific tests we can make. */
3139 else
3141 /* Optimize comparisons with upper and lower bounds. */
3142 if (SCALAR_INT_MODE_P (mode)
3143 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3145 rtx mmin, mmax;
3146 int sign;
3148 if (code == GEU
3149 || code == LEU
3150 || code == GTU
3151 || code == LTU)
3152 sign = 0;
3153 else
3154 sign = 1;
3156 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3158 tem = NULL_RTX;
3159 switch (code)
3161 case GEU:
3162 case GE:
3163 /* x >= min is always true. */
3164 if (rtx_equal_p (trueop1, mmin))
3165 tem = const_true_rtx;
3166 else
3167 break;
3169 case LEU:
3170 case LE:
3171 /* x <= max is always true. */
3172 if (rtx_equal_p (trueop1, mmax))
3173 tem = const_true_rtx;
3174 break;
3176 case GTU:
3177 case GT:
3178 /* x > max is always false. */
3179 if (rtx_equal_p (trueop1, mmax))
3180 tem = const0_rtx;
3181 break;
3183 case LTU:
3184 case LT:
3185 /* x < min is always false. */
3186 if (rtx_equal_p (trueop1, mmin))
3187 tem = const0_rtx;
3188 break;
3190 default:
3191 break;
3193 if (tem == const0_rtx
3194 || tem == const_true_rtx)
3195 return tem;
3198 switch (code)
3200 case EQ:
3201 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3202 return const0_rtx;
3203 break;
3205 case NE:
3206 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3207 return const_true_rtx;
3208 break;
3210 case LT:
3211 /* Optimize abs(x) < 0.0. */
3212 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3214 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3215 : trueop0;
3216 if (GET_CODE (tem) == ABS)
3217 return const0_rtx;
3219 break;
3221 case GE:
3222 /* Optimize abs(x) >= 0.0. */
3223 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3225 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3226 : trueop0;
3227 if (GET_CODE (tem) == ABS)
3228 return const_true_rtx;
3230 break;
3232 case UNGE:
3233 /* Optimize ! (abs(x) < 0.0). */
3234 if (trueop1 == CONST0_RTX (mode))
3236 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3237 : trueop0;
3238 if (GET_CODE (tem) == ABS)
3239 return const_true_rtx;
3241 break;
3243 default:
3244 break;
3247 return 0;
3250 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3251 as appropriate. */
3252 switch (code)
3254 case EQ:
3255 case UNEQ:
3256 return equal ? const_true_rtx : const0_rtx;
3257 case NE:
3258 case LTGT:
3259 return ! equal ? const_true_rtx : const0_rtx;
3260 case LT:
3261 case UNLT:
3262 return op0lt ? const_true_rtx : const0_rtx;
3263 case GT:
3264 case UNGT:
3265 return op1lt ? const_true_rtx : const0_rtx;
3266 case LTU:
3267 return op0ltu ? const_true_rtx : const0_rtx;
3268 case GTU:
3269 return op1ltu ? const_true_rtx : const0_rtx;
3270 case LE:
3271 case UNLE:
3272 return equal || op0lt ? const_true_rtx : const0_rtx;
3273 case GE:
3274 case UNGE:
3275 return equal || op1lt ? const_true_rtx : const0_rtx;
3276 case LEU:
3277 return equal || op0ltu ? const_true_rtx : const0_rtx;
3278 case GEU:
3279 return equal || op1ltu ? const_true_rtx : const0_rtx;
3280 case ORDERED:
3281 return const_true_rtx;
3282 case UNORDERED:
3283 return const0_rtx;
3284 default:
3285 gcc_unreachable ();
3289 /* Simplify CODE, an operation with result mode MODE and three operands,
3290 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3291 a constant. Return 0 if no simplifications is possible. */
3294 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3295 enum machine_mode op0_mode, rtx op0, rtx op1,
3296 rtx op2)
3298 unsigned int width = GET_MODE_BITSIZE (mode);
3300 /* VOIDmode means "infinite" precision. */
3301 if (width == 0)
3302 width = HOST_BITS_PER_WIDE_INT;
3304 switch (code)
3306 case SIGN_EXTRACT:
3307 case ZERO_EXTRACT:
3308 if (GET_CODE (op0) == CONST_INT
3309 && GET_CODE (op1) == CONST_INT
3310 && GET_CODE (op2) == CONST_INT
3311 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3312 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3314 /* Extracting a bit-field from a constant */
3315 HOST_WIDE_INT val = INTVAL (op0);
3317 if (BITS_BIG_ENDIAN)
3318 val >>= (GET_MODE_BITSIZE (op0_mode)
3319 - INTVAL (op2) - INTVAL (op1));
3320 else
3321 val >>= INTVAL (op2);
3323 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3325 /* First zero-extend. */
3326 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3327 /* If desired, propagate sign bit. */
3328 if (code == SIGN_EXTRACT
3329 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3330 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3333 /* Clear the bits that don't belong in our mode,
3334 unless they and our sign bit are all one.
3335 So we get either a reasonable negative value or a reasonable
3336 unsigned value for this mode. */
3337 if (width < HOST_BITS_PER_WIDE_INT
3338 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3339 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3340 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3342 return gen_int_mode (val, mode);
3344 break;
3346 case IF_THEN_ELSE:
3347 if (GET_CODE (op0) == CONST_INT)
3348 return op0 != const0_rtx ? op1 : op2;
3350 /* Convert c ? a : a into "a". */
3351 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3352 return op1;
3354 /* Convert a != b ? a : b into "a". */
3355 if (GET_CODE (op0) == NE
3356 && ! side_effects_p (op0)
3357 && ! HONOR_NANS (mode)
3358 && ! HONOR_SIGNED_ZEROS (mode)
3359 && ((rtx_equal_p (XEXP (op0, 0), op1)
3360 && rtx_equal_p (XEXP (op0, 1), op2))
3361 || (rtx_equal_p (XEXP (op0, 0), op2)
3362 && rtx_equal_p (XEXP (op0, 1), op1))))
3363 return op1;
3365 /* Convert a == b ? a : b into "b". */
3366 if (GET_CODE (op0) == EQ
3367 && ! side_effects_p (op0)
3368 && ! HONOR_NANS (mode)
3369 && ! HONOR_SIGNED_ZEROS (mode)
3370 && ((rtx_equal_p (XEXP (op0, 0), op1)
3371 && rtx_equal_p (XEXP (op0, 1), op2))
3372 || (rtx_equal_p (XEXP (op0, 0), op2)
3373 && rtx_equal_p (XEXP (op0, 1), op1))))
3374 return op2;
3376 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3378 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3379 ? GET_MODE (XEXP (op0, 1))
3380 : GET_MODE (XEXP (op0, 0)));
3381 rtx temp;
3383 /* Look for happy constants in op1 and op2. */
3384 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3386 HOST_WIDE_INT t = INTVAL (op1);
3387 HOST_WIDE_INT f = INTVAL (op2);
3389 if (t == STORE_FLAG_VALUE && f == 0)
3390 code = GET_CODE (op0);
3391 else if (t == 0 && f == STORE_FLAG_VALUE)
3393 enum rtx_code tmp;
3394 tmp = reversed_comparison_code (op0, NULL_RTX);
3395 if (tmp == UNKNOWN)
3396 break;
3397 code = tmp;
3399 else
3400 break;
3402 return simplify_gen_relational (code, mode, cmp_mode,
3403 XEXP (op0, 0), XEXP (op0, 1));
3406 if (cmp_mode == VOIDmode)
3407 cmp_mode = op0_mode;
3408 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3409 cmp_mode, XEXP (op0, 0),
3410 XEXP (op0, 1));
3412 /* See if any simplifications were possible. */
3413 if (temp)
3415 if (GET_CODE (temp) == CONST_INT)
3416 return temp == const0_rtx ? op2 : op1;
3417 else if (temp)
3418 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3421 break;
3423 case VEC_MERGE:
3424 gcc_assert (GET_MODE (op0) == mode);
3425 gcc_assert (GET_MODE (op1) == mode);
3426 gcc_assert (VECTOR_MODE_P (mode));
3427 op2 = avoid_constant_pool_reference (op2);
3428 if (GET_CODE (op2) == CONST_INT)
3430 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3431 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3432 int mask = (1 << n_elts) - 1;
3434 if (!(INTVAL (op2) & mask))
3435 return op1;
3436 if ((INTVAL (op2) & mask) == mask)
3437 return op0;
3439 op0 = avoid_constant_pool_reference (op0);
3440 op1 = avoid_constant_pool_reference (op1);
3441 if (GET_CODE (op0) == CONST_VECTOR
3442 && GET_CODE (op1) == CONST_VECTOR)
3444 rtvec v = rtvec_alloc (n_elts);
3445 unsigned int i;
3447 for (i = 0; i < n_elts; i++)
3448 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3449 ? CONST_VECTOR_ELT (op0, i)
3450 : CONST_VECTOR_ELT (op1, i));
3451 return gen_rtx_CONST_VECTOR (mode, v);
3454 break;
3456 default:
3457 gcc_unreachable ();
3460 return 0;
3463 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3464 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3466 Works by unpacking OP into a collection of 8-bit values
3467 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3468 and then repacking them again for OUTERMODE. */
3470 static rtx
3471 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3472 enum machine_mode innermode, unsigned int byte)
3474 /* We support up to 512-bit values (for V8DFmode). */
3475 enum {
3476 max_bitsize = 512,
3477 value_bit = 8,
3478 value_mask = (1 << value_bit) - 1
3480 unsigned char value[max_bitsize / value_bit];
3481 int value_start;
3482 int i;
3483 int elem;
3485 int num_elem;
3486 rtx * elems;
3487 int elem_bitsize;
3488 rtx result_s;
3489 rtvec result_v = NULL;
3490 enum mode_class outer_class;
3491 enum machine_mode outer_submode;
3493 /* Some ports misuse CCmode. */
3494 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3495 return op;
3497 /* We have no way to represent a complex constant at the rtl level. */
3498 if (COMPLEX_MODE_P (outermode))
3499 return NULL_RTX;
3501 /* Unpack the value. */
3503 if (GET_CODE (op) == CONST_VECTOR)
3505 num_elem = CONST_VECTOR_NUNITS (op);
3506 elems = &CONST_VECTOR_ELT (op, 0);
3507 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3509 else
3511 num_elem = 1;
3512 elems = &op;
3513 elem_bitsize = max_bitsize;
3515 /* If this asserts, it is too complicated; reducing value_bit may help. */
3516 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3517 /* I don't know how to handle endianness of sub-units. */
3518 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3520 for (elem = 0; elem < num_elem; elem++)
3522 unsigned char * vp;
3523 rtx el = elems[elem];
3525 /* Vectors are kept in target memory order. (This is probably
3526 a mistake.) */
3528 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3529 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3530 / BITS_PER_UNIT);
3531 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3532 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3533 unsigned bytele = (subword_byte % UNITS_PER_WORD
3534 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3535 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3538 switch (GET_CODE (el))
3540 case CONST_INT:
3541 for (i = 0;
3542 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3543 i += value_bit)
3544 *vp++ = INTVAL (el) >> i;
3545 /* CONST_INTs are always logically sign-extended. */
3546 for (; i < elem_bitsize; i += value_bit)
3547 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3548 break;
3550 case CONST_DOUBLE:
3551 if (GET_MODE (el) == VOIDmode)
3553 /* If this triggers, someone should have generated a
3554 CONST_INT instead. */
3555 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3557 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3558 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3559 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3561 *vp++
3562 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3563 i += value_bit;
3565 /* It shouldn't matter what's done here, so fill it with
3566 zero. */
3567 for (; i < max_bitsize; i += value_bit)
3568 *vp++ = 0;
3570 else
3572 long tmp[max_bitsize / 32];
3573 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3575 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3576 gcc_assert (bitsize <= elem_bitsize);
3577 gcc_assert (bitsize % value_bit == 0);
3579 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3580 GET_MODE (el));
3582 /* real_to_target produces its result in words affected by
3583 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3584 and use WORDS_BIG_ENDIAN instead; see the documentation
3585 of SUBREG in rtl.texi. */
3586 for (i = 0; i < bitsize; i += value_bit)
3588 int ibase;
3589 if (WORDS_BIG_ENDIAN)
3590 ibase = bitsize - 1 - i;
3591 else
3592 ibase = i;
3593 *vp++ = tmp[ibase / 32] >> i % 32;
3596 /* It shouldn't matter what's done here, so fill it with
3597 zero. */
3598 for (; i < elem_bitsize; i += value_bit)
3599 *vp++ = 0;
3601 break;
3603 default:
3604 gcc_unreachable ();
3608 /* Now, pick the right byte to start with. */
3609 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3610 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3611 will already have offset 0. */
3612 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3614 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3615 - byte);
3616 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3617 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3618 byte = (subword_byte % UNITS_PER_WORD
3619 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3622 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3623 so if it's become negative it will instead be very large.) */
3624 gcc_assert (byte < GET_MODE_SIZE (innermode));
3626 /* Convert from bytes to chunks of size value_bit. */
3627 value_start = byte * (BITS_PER_UNIT / value_bit);
3629 /* Re-pack the value. */
3631 if (VECTOR_MODE_P (outermode))
3633 num_elem = GET_MODE_NUNITS (outermode);
3634 result_v = rtvec_alloc (num_elem);
3635 elems = &RTVEC_ELT (result_v, 0);
3636 outer_submode = GET_MODE_INNER (outermode);
3638 else
3640 num_elem = 1;
3641 elems = &result_s;
3642 outer_submode = outermode;
3645 outer_class = GET_MODE_CLASS (outer_submode);
3646 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3648 gcc_assert (elem_bitsize % value_bit == 0);
3649 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3651 for (elem = 0; elem < num_elem; elem++)
3653 unsigned char *vp;
3655 /* Vectors are stored in target memory order. (This is probably
3656 a mistake.) */
3658 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3659 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3660 / BITS_PER_UNIT);
3661 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3662 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3663 unsigned bytele = (subword_byte % UNITS_PER_WORD
3664 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3665 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3668 switch (outer_class)
3670 case MODE_INT:
3671 case MODE_PARTIAL_INT:
3673 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3675 for (i = 0;
3676 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3677 i += value_bit)
3678 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3679 for (; i < elem_bitsize; i += value_bit)
3680 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3681 << (i - HOST_BITS_PER_WIDE_INT));
3683 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3684 know why. */
3685 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3686 elems[elem] = gen_int_mode (lo, outer_submode);
3687 else
3688 elems[elem] = immed_double_const (lo, hi, outer_submode);
3690 break;
3692 case MODE_FLOAT:
3694 REAL_VALUE_TYPE r;
3695 long tmp[max_bitsize / 32];
3697 /* real_from_target wants its input in words affected by
3698 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3699 and use WORDS_BIG_ENDIAN instead; see the documentation
3700 of SUBREG in rtl.texi. */
3701 for (i = 0; i < max_bitsize / 32; i++)
3702 tmp[i] = 0;
3703 for (i = 0; i < elem_bitsize; i += value_bit)
3705 int ibase;
3706 if (WORDS_BIG_ENDIAN)
3707 ibase = elem_bitsize - 1 - i;
3708 else
3709 ibase = i;
3710 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3713 real_from_target (&r, tmp, outer_submode);
3714 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3716 break;
3718 default:
3719 gcc_unreachable ();
3722 if (VECTOR_MODE_P (outermode))
3723 return gen_rtx_CONST_VECTOR (outermode, result_v);
3724 else
3725 return result_s;
3728 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3729 Return 0 if no simplifications are possible. */
3731 simplify_subreg (enum machine_mode outermode, rtx op,
3732 enum machine_mode innermode, unsigned int byte)
3734 /* Little bit of sanity checking. */
3735 gcc_assert (innermode != VOIDmode);
3736 gcc_assert (outermode != VOIDmode);
3737 gcc_assert (innermode != BLKmode);
3738 gcc_assert (outermode != BLKmode);
3740 gcc_assert (GET_MODE (op) == innermode
3741 || GET_MODE (op) == VOIDmode);
3743 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3744 gcc_assert (byte < GET_MODE_SIZE (innermode));
3746 if (outermode == innermode && !byte)
3747 return op;
3749 if (GET_CODE (op) == CONST_INT
3750 || GET_CODE (op) == CONST_DOUBLE
3751 || GET_CODE (op) == CONST_VECTOR)
3752 return simplify_immed_subreg (outermode, op, innermode, byte);
3754 /* Changing mode twice with SUBREG => just change it once,
3755 or not at all if changing back op starting mode. */
3756 if (GET_CODE (op) == SUBREG)
3758 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3759 int final_offset = byte + SUBREG_BYTE (op);
3760 rtx newx;
3762 if (outermode == innermostmode
3763 && byte == 0 && SUBREG_BYTE (op) == 0)
3764 return SUBREG_REG (op);
3766 /* The SUBREG_BYTE represents offset, as if the value were stored
3767 in memory. Irritating exception is paradoxical subreg, where
3768 we define SUBREG_BYTE to be 0. On big endian machines, this
3769 value should be negative. For a moment, undo this exception. */
3770 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3772 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3773 if (WORDS_BIG_ENDIAN)
3774 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3775 if (BYTES_BIG_ENDIAN)
3776 final_offset += difference % UNITS_PER_WORD;
3778 if (SUBREG_BYTE (op) == 0
3779 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3781 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3782 if (WORDS_BIG_ENDIAN)
3783 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3784 if (BYTES_BIG_ENDIAN)
3785 final_offset += difference % UNITS_PER_WORD;
3788 /* See whether resulting subreg will be paradoxical. */
3789 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3791 /* In nonparadoxical subregs we can't handle negative offsets. */
3792 if (final_offset < 0)
3793 return NULL_RTX;
3794 /* Bail out in case resulting subreg would be incorrect. */
3795 if (final_offset % GET_MODE_SIZE (outermode)
3796 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3797 return NULL_RTX;
3799 else
3801 int offset = 0;
3802 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3804 /* In paradoxical subreg, see if we are still looking on lower part.
3805 If so, our SUBREG_BYTE will be 0. */
3806 if (WORDS_BIG_ENDIAN)
3807 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3808 if (BYTES_BIG_ENDIAN)
3809 offset += difference % UNITS_PER_WORD;
3810 if (offset == final_offset)
3811 final_offset = 0;
3812 else
3813 return NULL_RTX;
3816 /* Recurse for further possible simplifications. */
3817 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3818 final_offset);
3819 if (newx)
3820 return newx;
3821 if (validate_subreg (outermode, innermostmode,
3822 SUBREG_REG (op), final_offset))
3823 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3824 return NULL_RTX;
3827 /* SUBREG of a hard register => just change the register number
3828 and/or mode. If the hard register is not valid in that mode,
3829 suppress this simplification. If the hard register is the stack,
3830 frame, or argument pointer, leave this as a SUBREG. */
3832 if (REG_P (op)
3833 && REGNO (op) < FIRST_PSEUDO_REGISTER
3834 #ifdef CANNOT_CHANGE_MODE_CLASS
3835 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3836 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3837 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3838 #endif
3839 && ((reload_completed && !frame_pointer_needed)
3840 || (REGNO (op) != FRAME_POINTER_REGNUM
3841 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3842 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3843 #endif
3845 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3846 && REGNO (op) != ARG_POINTER_REGNUM
3847 #endif
3848 && REGNO (op) != STACK_POINTER_REGNUM
3849 && subreg_offset_representable_p (REGNO (op), innermode,
3850 byte, outermode))
3852 unsigned int regno = REGNO (op);
3853 unsigned int final_regno
3854 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3856 /* ??? We do allow it if the current REG is not valid for
3857 its mode. This is a kludge to work around how float/complex
3858 arguments are passed on 32-bit SPARC and should be fixed. */
3859 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3860 || ! HARD_REGNO_MODE_OK (regno, innermode))
3862 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3864 /* Propagate original regno. We don't have any way to specify
3865 the offset inside original regno, so do so only for lowpart.
3866 The information is used only by alias analysis that can not
3867 grog partial register anyway. */
3869 if (subreg_lowpart_offset (outermode, innermode) == byte)
3870 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3871 return x;
3875 /* If we have a SUBREG of a register that we are replacing and we are
3876 replacing it with a MEM, make a new MEM and try replacing the
3877 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3878 or if we would be widening it. */
3880 if (MEM_P (op)
3881 && ! mode_dependent_address_p (XEXP (op, 0))
3882 /* Allow splitting of volatile memory references in case we don't
3883 have instruction to move the whole thing. */
3884 && (! MEM_VOLATILE_P (op)
3885 || ! have_insn_for (SET, innermode))
3886 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3887 return adjust_address_nv (op, outermode, byte);
3889 /* Handle complex values represented as CONCAT
3890 of real and imaginary part. */
3891 if (GET_CODE (op) == CONCAT)
3893 unsigned int inner_size, final_offset;
3894 rtx part, res;
3896 inner_size = GET_MODE_UNIT_SIZE (innermode);
3897 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3898 final_offset = byte % inner_size;
3899 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3900 return NULL_RTX;
3902 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3903 if (res)
3904 return res;
3905 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3906 return gen_rtx_SUBREG (outermode, part, final_offset);
3907 return NULL_RTX;
3910 /* Optimize SUBREG truncations of zero and sign extended values. */
3911 if ((GET_CODE (op) == ZERO_EXTEND
3912 || GET_CODE (op) == SIGN_EXTEND)
3913 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3915 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3917 /* If we're requesting the lowpart of a zero or sign extension,
3918 there are three possibilities. If the outermode is the same
3919 as the origmode, we can omit both the extension and the subreg.
3920 If the outermode is not larger than the origmode, we can apply
3921 the truncation without the extension. Finally, if the outermode
3922 is larger than the origmode, but both are integer modes, we
3923 can just extend to the appropriate mode. */
3924 if (bitpos == 0)
3926 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3927 if (outermode == origmode)
3928 return XEXP (op, 0);
3929 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3930 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3931 subreg_lowpart_offset (outermode,
3932 origmode));
3933 if (SCALAR_INT_MODE_P (outermode))
3934 return simplify_gen_unary (GET_CODE (op), outermode,
3935 XEXP (op, 0), origmode);
3938 /* A SUBREG resulting from a zero extension may fold to zero if
3939 it extracts higher bits that the ZERO_EXTEND's source bits. */
3940 if (GET_CODE (op) == ZERO_EXTEND
3941 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3942 return CONST0_RTX (outermode);
3945 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3946 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3947 the outer subreg is effectively a truncation to the original mode. */
3948 if ((GET_CODE (op) == LSHIFTRT
3949 || GET_CODE (op) == ASHIFTRT)
3950 && SCALAR_INT_MODE_P (outermode)
3951 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3952 to avoid the possibility that an outer LSHIFTRT shifts by more
3953 than the sign extension's sign_bit_copies and introduces zeros
3954 into the high bits of the result. */
3955 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
3956 && GET_CODE (XEXP (op, 1)) == CONST_INT
3957 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
3958 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3959 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3960 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3961 return simplify_gen_binary (ASHIFTRT, outermode,
3962 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3964 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
3965 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
3966 the outer subreg is effectively a truncation to the original mode. */
3967 if ((GET_CODE (op) == LSHIFTRT
3968 || GET_CODE (op) == ASHIFTRT)
3969 && SCALAR_INT_MODE_P (outermode)
3970 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3971 && GET_CODE (XEXP (op, 1)) == CONST_INT
3972 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3973 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3974 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3975 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3976 return simplify_gen_binary (LSHIFTRT, outermode,
3977 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3979 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
3980 to (ashift:QI (x:QI) C), where C is a suitable small constant and
3981 the outer subreg is effectively a truncation to the original mode. */
3982 if (GET_CODE (op) == ASHIFT
3983 && SCALAR_INT_MODE_P (outermode)
3984 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3985 && GET_CODE (XEXP (op, 1)) == CONST_INT
3986 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3987 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
3988 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3989 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3990 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3991 return simplify_gen_binary (ASHIFT, outermode,
3992 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3994 return NULL_RTX;
3997 /* Make a SUBREG operation or equivalent if it folds. */
4000 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4001 enum machine_mode innermode, unsigned int byte)
4003 rtx newx;
4005 newx = simplify_subreg (outermode, op, innermode, byte);
4006 if (newx)
4007 return newx;
4009 if (GET_CODE (op) == SUBREG
4010 || GET_CODE (op) == CONCAT
4011 || GET_MODE (op) == VOIDmode)
4012 return NULL_RTX;
4014 if (validate_subreg (outermode, innermode, op, byte))
4015 return gen_rtx_SUBREG (outermode, op, byte);
4017 return NULL_RTX;
4020 /* Simplify X, an rtx expression.
4022 Return the simplified expression or NULL if no simplifications
4023 were possible.
4025 This is the preferred entry point into the simplification routines;
4026 however, we still allow passes to call the more specific routines.
4028 Right now GCC has three (yes, three) major bodies of RTL simplification
4029 code that need to be unified.
4031 1. fold_rtx in cse.c. This code uses various CSE specific
4032 information to aid in RTL simplification.
4034 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4035 it uses combine specific information to aid in RTL
4036 simplification.
4038 3. The routines in this file.
4041 Long term we want to only have one body of simplification code; to
4042 get to that state I recommend the following steps:
4044 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4045 which are not pass dependent state into these routines.
4047 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4048 use this routine whenever possible.
4050 3. Allow for pass dependent state to be provided to these
4051 routines and add simplifications based on the pass dependent
4052 state. Remove code from cse.c & combine.c that becomes
4053 redundant/dead.
4055 It will take time, but ultimately the compiler will be easier to
4056 maintain and improve. It's totally silly that when we add a
4057 simplification that it needs to be added to 4 places (3 for RTL
4058 simplification and 1 for tree simplification. */
4061 simplify_rtx (rtx x)
4063 enum rtx_code code = GET_CODE (x);
4064 enum machine_mode mode = GET_MODE (x);
4066 switch (GET_RTX_CLASS (code))
4068 case RTX_UNARY:
4069 return simplify_unary_operation (code, mode,
4070 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4071 case RTX_COMM_ARITH:
4072 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4073 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4075 /* Fall through.... */
4077 case RTX_BIN_ARITH:
4078 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4080 case RTX_TERNARY:
4081 case RTX_BITFIELD_OPS:
4082 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4083 XEXP (x, 0), XEXP (x, 1),
4084 XEXP (x, 2));
4086 case RTX_COMPARE:
4087 case RTX_COMM_COMPARE:
4088 return simplify_relational_operation (code, mode,
4089 ((GET_MODE (XEXP (x, 0))
4090 != VOIDmode)
4091 ? GET_MODE (XEXP (x, 0))
4092 : GET_MODE (XEXP (x, 1))),
4093 XEXP (x, 0),
4094 XEXP (x, 1));
4096 case RTX_EXTRA:
4097 if (code == SUBREG)
4098 return simplify_gen_subreg (mode, SUBREG_REG (x),
4099 GET_MODE (SUBREG_REG (x)),
4100 SUBREG_BYTE (x));
4101 break;
4103 case RTX_OBJ:
4104 if (code == LO_SUM)
4106 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4107 if (GET_CODE (XEXP (x, 0)) == HIGH
4108 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4109 return XEXP (x, 1);
4111 break;
4113 default:
4114 break;
4116 return NULL;