Import stripped gcc-4.0.1 sources.
[dragonfly.git] / contrib / gcc-4.0 / gcc / simplify-rtx.c
blob2c499d0b5faec1eecf256a68fb8fc10d0eb1e8ff
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, rtx i)
69 return gen_int_mode (- INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (enum machine_mode mode, rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_BITSIZE (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x) == CONST_INT)
90 val = INTVAL (x);
91 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
98 else
99 return false;
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
110 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
111 rtx op1)
113 rtx tem;
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
122 if (tem)
123 return tem;
125 /* Handle addition and subtraction specially. Otherwise, just form
126 the operation. */
128 if (code == PLUS || code == MINUS)
130 tem = simplify_plus_minus (code, mode, op0, op1, 1);
131 if (tem)
132 return tem;
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
141 avoid_constant_pool_reference (rtx x)
143 rtx c, tmp, addr;
144 enum machine_mode cmode;
146 switch (GET_CODE (x))
148 case MEM:
149 break;
151 case FLOAT_EXTEND:
152 /* Handle float extensions of constant pool references. */
153 tmp = XEXP (x, 0);
154 c = avoid_constant_pool_reference (tmp);
155 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
157 REAL_VALUE_TYPE d;
159 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
162 return x;
164 default:
165 return x;
168 addr = XEXP (x, 0);
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr = targetm.delegitimize_address (addr);
173 if (GET_CODE (addr) == LO_SUM)
174 addr = XEXP (addr, 1);
176 if (GET_CODE (addr) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr))
178 return x;
180 c = get_pool_constant (addr);
181 cmode = get_pool_mode (addr);
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode != GET_MODE (x))
188 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
189 return c ? c : x;
192 return c;
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
199 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
200 enum machine_mode op_mode)
202 rtx tem;
204 /* If this simplifies, use it. */
205 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
206 return tem;
208 return gen_rtx_fmt_e (code, mode, op);
211 /* Likewise for ternary operations. */
214 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
215 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
217 rtx tem;
219 /* If this simplifies, use it. */
220 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
221 op0, op1, op2)))
222 return tem;
224 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
231 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
232 enum machine_mode cmp_mode, rtx op0, rtx op1)
234 rtx tem;
236 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
237 op0, op1)))
238 return tem;
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
243 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251 enum machine_mode op_mode;
252 rtx op0, op1, op2;
254 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
258 if (x == old_rtx)
259 return new_rtx;
261 switch (GET_RTX_CLASS (code))
263 case RTX_UNARY:
264 op0 = XEXP (x, 0);
265 op_mode = GET_MODE (op0);
266 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
267 if (op0 == XEXP (x, 0))
268 return x;
269 return simplify_gen_unary (code, mode, op0, op_mode);
271 case RTX_BIN_ARITH:
272 case RTX_COMM_ARITH:
273 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
274 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
275 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
276 return x;
277 return simplify_gen_binary (code, mode, op0, op1);
279 case RTX_COMPARE:
280 case RTX_COMM_COMPARE:
281 op0 = XEXP (x, 0);
282 op1 = XEXP (x, 1);
283 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
284 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return x;
288 return simplify_gen_relational (code, mode, op_mode, op0, op1);
290 case RTX_TERNARY:
291 case RTX_BITFIELD_OPS:
292 op0 = XEXP (x, 0);
293 op_mode = GET_MODE (op0);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
296 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
298 return x;
299 if (op_mode == VOIDmode)
300 op_mode = GET_MODE (op0);
301 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
303 case RTX_EXTRA:
304 /* The only case we try to handle is a SUBREG. */
305 if (code == SUBREG)
307 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
308 if (op0 == SUBREG_REG (x))
309 return x;
310 op0 = simplify_gen_subreg (GET_MODE (x), op0,
311 GET_MODE (SUBREG_REG (x)),
312 SUBREG_BYTE (x));
313 return op0 ? op0 : x;
315 break;
317 case RTX_OBJ:
318 if (code == MEM)
320 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
321 if (op0 == XEXP (x, 0))
322 return x;
323 return replace_equiv_address_nv (x, op0);
325 else if (code == LO_SUM)
327 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
328 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
332 return op1;
334 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
335 return x;
336 return gen_rtx_LO_SUM (mode, op0, op1);
338 else if (code == REG)
340 if (rtx_equal_p (x, old_rtx))
341 return new_rtx;
343 break;
345 default:
346 break;
348 return x;
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
355 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
356 rtx op, enum machine_mode op_mode)
358 unsigned int width = GET_MODE_BITSIZE (mode);
359 rtx trueop = avoid_constant_pool_reference (op);
361 if (code == VEC_DUPLICATE)
363 gcc_assert (VECTOR_MODE_P (mode));
364 if (GET_MODE (trueop) != VOIDmode)
366 if (!VECTOR_MODE_P (GET_MODE (trueop)))
367 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
368 else
369 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
370 (GET_MODE (trueop)));
372 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
373 || GET_CODE (trueop) == CONST_VECTOR)
375 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
376 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
377 rtvec v = rtvec_alloc (n_elts);
378 unsigned int i;
380 if (GET_CODE (trueop) != CONST_VECTOR)
381 for (i = 0; i < n_elts; i++)
382 RTVEC_ELT (v, i) = trueop;
383 else
385 enum machine_mode inmode = GET_MODE (trueop);
386 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
387 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
389 gcc_assert (in_n_elts < n_elts);
390 gcc_assert ((n_elts % in_n_elts) == 0);
391 for (i = 0; i < n_elts; i++)
392 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
394 return gen_rtx_CONST_VECTOR (mode, v);
397 else if (GET_CODE (op) == CONST)
398 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
400 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
402 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
403 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
404 enum machine_mode opmode = GET_MODE (trueop);
405 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
406 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
407 rtvec v = rtvec_alloc (n_elts);
408 unsigned int i;
410 gcc_assert (op_n_elts == n_elts);
411 for (i = 0; i < n_elts; i++)
413 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
414 CONST_VECTOR_ELT (trueop, i),
415 GET_MODE_INNER (opmode));
416 if (!x)
417 return 0;
418 RTVEC_ELT (v, i) = x;
420 return gen_rtx_CONST_VECTOR (mode, v);
423 /* The order of these tests is critical so that, for example, we don't
424 check the wrong mode (input vs. output) for a conversion operation,
425 such as FIX. At some point, this should be simplified. */
427 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
428 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
430 HOST_WIDE_INT hv, lv;
431 REAL_VALUE_TYPE d;
433 if (GET_CODE (trueop) == CONST_INT)
434 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
435 else
436 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
438 REAL_VALUE_FROM_INT (d, lv, hv, mode);
439 d = real_value_truncate (mode, d);
440 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
442 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
443 && (GET_CODE (trueop) == CONST_DOUBLE
444 || GET_CODE (trueop) == CONST_INT))
446 HOST_WIDE_INT hv, lv;
447 REAL_VALUE_TYPE d;
449 if (GET_CODE (trueop) == CONST_INT)
450 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
451 else
452 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
454 if (op_mode == VOIDmode)
456 /* We don't know how to interpret negative-looking numbers in
457 this case, so don't try to fold those. */
458 if (hv < 0)
459 return 0;
461 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
463 else
464 hv = 0, lv &= GET_MODE_MASK (op_mode);
466 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
467 d = real_value_truncate (mode, d);
468 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
471 if (GET_CODE (trueop) == CONST_INT
472 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
474 HOST_WIDE_INT arg0 = INTVAL (trueop);
475 HOST_WIDE_INT val;
477 switch (code)
479 case NOT:
480 val = ~ arg0;
481 break;
483 case NEG:
484 val = - arg0;
485 break;
487 case ABS:
488 val = (arg0 >= 0 ? arg0 : - arg0);
489 break;
491 case FFS:
492 /* Don't use ffs here. Instead, get low order bit and then its
493 number. If arg0 is zero, this will return 0, as desired. */
494 arg0 &= GET_MODE_MASK (mode);
495 val = exact_log2 (arg0 & (- arg0)) + 1;
496 break;
498 case CLZ:
499 arg0 &= GET_MODE_MASK (mode);
500 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
502 else
503 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
504 break;
506 case CTZ:
507 arg0 &= GET_MODE_MASK (mode);
508 if (arg0 == 0)
510 /* Even if the value at zero is undefined, we have to come
511 up with some replacement. Seems good enough. */
512 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
513 val = GET_MODE_BITSIZE (mode);
515 else
516 val = exact_log2 (arg0 & -arg0);
517 break;
519 case POPCOUNT:
520 arg0 &= GET_MODE_MASK (mode);
521 val = 0;
522 while (arg0)
523 val++, arg0 &= arg0 - 1;
524 break;
526 case PARITY:
527 arg0 &= GET_MODE_MASK (mode);
528 val = 0;
529 while (arg0)
530 val++, arg0 &= arg0 - 1;
531 val &= 1;
532 break;
534 case TRUNCATE:
535 val = arg0;
536 break;
538 case ZERO_EXTEND:
539 /* When zero-extending a CONST_INT, we need to know its
540 original mode. */
541 gcc_assert (op_mode != VOIDmode);
542 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
544 /* If we were really extending the mode,
545 we would have to distinguish between zero-extension
546 and sign-extension. */
547 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
548 val = arg0;
550 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
551 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
552 else
553 return 0;
554 break;
556 case SIGN_EXTEND:
557 if (op_mode == VOIDmode)
558 op_mode = mode;
559 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
561 /* If we were really extending the mode,
562 we would have to distinguish between zero-extension
563 and sign-extension. */
564 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
565 val = arg0;
567 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
570 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
571 if (val
572 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
573 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
575 else
576 return 0;
577 break;
579 case SQRT:
580 case FLOAT_EXTEND:
581 case FLOAT_TRUNCATE:
582 case SS_TRUNCATE:
583 case US_TRUNCATE:
584 return 0;
586 default:
587 gcc_unreachable ();
590 val = trunc_int_for_mode (val, mode);
592 return GEN_INT (val);
595 /* We can do some operations on integer CONST_DOUBLEs. Also allow
596 for a DImode operation on a CONST_INT. */
597 else if (GET_MODE (trueop) == VOIDmode
598 && width <= HOST_BITS_PER_WIDE_INT * 2
599 && (GET_CODE (trueop) == CONST_DOUBLE
600 || GET_CODE (trueop) == CONST_INT))
602 unsigned HOST_WIDE_INT l1, lv;
603 HOST_WIDE_INT h1, hv;
605 if (GET_CODE (trueop) == CONST_DOUBLE)
606 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
607 else
608 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
610 switch (code)
612 case NOT:
613 lv = ~ l1;
614 hv = ~ h1;
615 break;
617 case NEG:
618 neg_double (l1, h1, &lv, &hv);
619 break;
621 case ABS:
622 if (h1 < 0)
623 neg_double (l1, h1, &lv, &hv);
624 else
625 lv = l1, hv = h1;
626 break;
628 case FFS:
629 hv = 0;
630 if (l1 == 0)
632 if (h1 == 0)
633 lv = 0;
634 else
635 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
637 else
638 lv = exact_log2 (l1 & -l1) + 1;
639 break;
641 case CLZ:
642 hv = 0;
643 if (h1 != 0)
644 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
645 - HOST_BITS_PER_WIDE_INT;
646 else if (l1 != 0)
647 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
648 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
649 lv = GET_MODE_BITSIZE (mode);
650 break;
652 case CTZ:
653 hv = 0;
654 if (l1 != 0)
655 lv = exact_log2 (l1 & -l1);
656 else if (h1 != 0)
657 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
658 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
659 lv = GET_MODE_BITSIZE (mode);
660 break;
662 case POPCOUNT:
663 hv = 0;
664 lv = 0;
665 while (l1)
666 lv++, l1 &= l1 - 1;
667 while (h1)
668 lv++, h1 &= h1 - 1;
669 break;
671 case PARITY:
672 hv = 0;
673 lv = 0;
674 while (l1)
675 lv++, l1 &= l1 - 1;
676 while (h1)
677 lv++, h1 &= h1 - 1;
678 lv &= 1;
679 break;
681 case TRUNCATE:
682 /* This is just a change-of-mode, so do nothing. */
683 lv = l1, hv = h1;
684 break;
686 case ZERO_EXTEND:
687 gcc_assert (op_mode != VOIDmode);
689 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
690 return 0;
692 hv = 0;
693 lv = l1 & GET_MODE_MASK (op_mode);
694 break;
696 case SIGN_EXTEND:
697 if (op_mode == VOIDmode
698 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
699 return 0;
700 else
702 lv = l1 & GET_MODE_MASK (op_mode);
703 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
704 && (lv & ((HOST_WIDE_INT) 1
705 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
706 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
708 hv = HWI_SIGN_EXTEND (lv);
710 break;
712 case SQRT:
713 return 0;
715 default:
716 return 0;
719 return immed_double_const (lv, hv, mode);
722 else if (GET_CODE (trueop) == CONST_DOUBLE
723 && GET_MODE_CLASS (mode) == MODE_FLOAT)
725 REAL_VALUE_TYPE d, t;
726 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
728 switch (code)
730 case SQRT:
731 if (HONOR_SNANS (mode) && real_isnan (&d))
732 return 0;
733 real_sqrt (&t, mode, &d);
734 d = t;
735 break;
736 case ABS:
737 d = REAL_VALUE_ABS (d);
738 break;
739 case NEG:
740 d = REAL_VALUE_NEGATE (d);
741 break;
742 case FLOAT_TRUNCATE:
743 d = real_value_truncate (mode, d);
744 break;
745 case FLOAT_EXTEND:
746 /* All this does is change the mode. */
747 break;
748 case FIX:
749 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
750 break;
751 case NOT:
753 long tmp[4];
754 int i;
756 real_to_target (tmp, &d, GET_MODE (trueop));
757 for (i = 0; i < 4; i++)
758 tmp[i] = ~tmp[i];
759 real_from_target (&d, tmp, mode);
761 break;
762 default:
763 gcc_unreachable ();
765 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
768 else if (GET_CODE (trueop) == CONST_DOUBLE
769 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
770 && GET_MODE_CLASS (mode) == MODE_INT
771 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
773 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
774 operators are intentionally left unspecified (to ease implementation
775 by target backends), for consistency, this routine implements the
776 same semantics for constant folding as used by the middle-end. */
778 HOST_WIDE_INT xh, xl, th, tl;
779 REAL_VALUE_TYPE x, t;
780 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
781 switch (code)
783 case FIX:
784 if (REAL_VALUE_ISNAN (x))
785 return const0_rtx;
787 /* Test against the signed upper bound. */
788 if (width > HOST_BITS_PER_WIDE_INT)
790 th = ((unsigned HOST_WIDE_INT) 1
791 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
792 tl = -1;
794 else
796 th = 0;
797 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
799 real_from_integer (&t, VOIDmode, tl, th, 0);
800 if (REAL_VALUES_LESS (t, x))
802 xh = th;
803 xl = tl;
804 break;
807 /* Test against the signed lower bound. */
808 if (width > HOST_BITS_PER_WIDE_INT)
810 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
811 tl = 0;
813 else
815 th = -1;
816 tl = (HOST_WIDE_INT) -1 << (width - 1);
818 real_from_integer (&t, VOIDmode, tl, th, 0);
819 if (REAL_VALUES_LESS (x, t))
821 xh = th;
822 xl = tl;
823 break;
825 REAL_VALUE_TO_INT (&xl, &xh, x);
826 break;
828 case UNSIGNED_FIX:
829 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
830 return const0_rtx;
832 /* Test against the unsigned upper bound. */
833 if (width == 2*HOST_BITS_PER_WIDE_INT)
835 th = -1;
836 tl = -1;
838 else if (width >= HOST_BITS_PER_WIDE_INT)
840 th = ((unsigned HOST_WIDE_INT) 1
841 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
842 tl = -1;
844 else
846 th = 0;
847 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
849 real_from_integer (&t, VOIDmode, tl, th, 1);
850 if (REAL_VALUES_LESS (t, x))
852 xh = th;
853 xl = tl;
854 break;
857 REAL_VALUE_TO_INT (&xl, &xh, x);
858 break;
860 default:
861 gcc_unreachable ();
863 return immed_double_const (xl, xh, mode);
866 /* This was formerly used only for non-IEEE float.
867 eggert@twinsun.com says it is safe for IEEE also. */
868 else
870 enum rtx_code reversed;
871 rtx temp;
873 /* There are some simplifications we can do even if the operands
874 aren't constant. */
875 switch (code)
877 case NOT:
878 /* (not (not X)) == X. */
879 if (GET_CODE (op) == NOT)
880 return XEXP (op, 0);
882 /* (not (eq X Y)) == (ne X Y), etc. */
883 if (COMPARISON_P (op)
884 && (mode == BImode || STORE_FLAG_VALUE == -1)
885 && ((reversed = reversed_comparison_code (op, NULL_RTX))
886 != UNKNOWN))
887 return simplify_gen_relational (reversed, mode, VOIDmode,
888 XEXP (op, 0), XEXP (op, 1));
890 /* (not (plus X -1)) can become (neg X). */
891 if (GET_CODE (op) == PLUS
892 && XEXP (op, 1) == constm1_rtx)
893 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
895 /* Similarly, (not (neg X)) is (plus X -1). */
896 if (GET_CODE (op) == NEG)
897 return plus_constant (XEXP (op, 0), -1);
899 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
900 if (GET_CODE (op) == XOR
901 && GET_CODE (XEXP (op, 1)) == CONST_INT
902 && (temp = simplify_unary_operation (NOT, mode,
903 XEXP (op, 1),
904 mode)) != 0)
905 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
907 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
908 if (GET_CODE (op) == PLUS
909 && GET_CODE (XEXP (op, 1)) == CONST_INT
910 && mode_signbit_p (mode, XEXP (op, 1))
911 && (temp = simplify_unary_operation (NOT, mode,
912 XEXP (op, 1),
913 mode)) != 0)
914 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
918 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
919 operands other than 1, but that is not valid. We could do a
920 similar simplification for (not (lshiftrt C X)) where C is
921 just the sign bit, but this doesn't seem common enough to
922 bother with. */
923 if (GET_CODE (op) == ASHIFT
924 && XEXP (op, 0) == const1_rtx)
926 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
927 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
930 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
931 by reversing the comparison code if valid. */
932 if (STORE_FLAG_VALUE == -1
933 && COMPARISON_P (op)
934 && (reversed = reversed_comparison_code (op, NULL_RTX))
935 != UNKNOWN)
936 return simplify_gen_relational (reversed, mode, VOIDmode,
937 XEXP (op, 0), XEXP (op, 1));
939 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
940 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
941 so we can perform the above simplification. */
943 if (STORE_FLAG_VALUE == -1
944 && GET_CODE (op) == ASHIFTRT
945 && GET_CODE (XEXP (op, 1)) == CONST_INT
946 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
947 return simplify_gen_relational (GE, mode, VOIDmode,
948 XEXP (op, 0), const0_rtx);
950 break;
952 case NEG:
953 /* (neg (neg X)) == X. */
954 if (GET_CODE (op) == NEG)
955 return XEXP (op, 0);
957 /* (neg (plus X 1)) can become (not X). */
958 if (GET_CODE (op) == PLUS
959 && XEXP (op, 1) == const1_rtx)
960 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
962 /* Similarly, (neg (not X)) is (plus X 1). */
963 if (GET_CODE (op) == NOT)
964 return plus_constant (XEXP (op, 0), 1);
966 /* (neg (minus X Y)) can become (minus Y X). This transformation
967 isn't safe for modes with signed zeros, since if X and Y are
968 both +0, (minus Y X) is the same as (minus X Y). If the
969 rounding mode is towards +infinity (or -infinity) then the two
970 expressions will be rounded differently. */
971 if (GET_CODE (op) == MINUS
972 && !HONOR_SIGNED_ZEROS (mode)
973 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
974 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
975 XEXP (op, 0));
977 if (GET_CODE (op) == PLUS
978 && !HONOR_SIGNED_ZEROS (mode)
979 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
981 /* (neg (plus A C)) is simplified to (minus -C A). */
982 if (GET_CODE (XEXP (op, 1)) == CONST_INT
983 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
985 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
986 mode);
987 if (temp)
988 return simplify_gen_binary (MINUS, mode, temp,
989 XEXP (op, 0));
992 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
993 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
994 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
997 /* (neg (mult A B)) becomes (mult (neg A) B).
998 This works even for floating-point values. */
999 if (GET_CODE (op) == MULT
1000 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1002 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1003 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1006 /* NEG commutes with ASHIFT since it is multiplication. Only do
1007 this if we can then eliminate the NEG (e.g., if the operand
1008 is a constant). */
1009 if (GET_CODE (op) == ASHIFT)
1011 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1012 mode);
1013 if (temp)
1014 return simplify_gen_binary (ASHIFT, mode, temp,
1015 XEXP (op, 1));
1018 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1019 C is equal to the width of MODE minus 1. */
1020 if (GET_CODE (op) == ASHIFTRT
1021 && GET_CODE (XEXP (op, 1)) == CONST_INT
1022 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1023 return simplify_gen_binary (LSHIFTRT, mode,
1024 XEXP (op, 0), XEXP (op, 1));
1026 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1027 C is equal to the width of MODE minus 1. */
1028 if (GET_CODE (op) == LSHIFTRT
1029 && GET_CODE (XEXP (op, 1)) == CONST_INT
1030 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1031 return simplify_gen_binary (ASHIFTRT, mode,
1032 XEXP (op, 0), XEXP (op, 1));
1034 break;
1036 case SIGN_EXTEND:
1037 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1038 becomes just the MINUS if its mode is MODE. This allows
1039 folding switch statements on machines using casesi (such as
1040 the VAX). */
1041 if (GET_CODE (op) == TRUNCATE
1042 && GET_MODE (XEXP (op, 0)) == mode
1043 && GET_CODE (XEXP (op, 0)) == MINUS
1044 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1045 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1046 return XEXP (op, 0);
1048 /* Check for a sign extension of a subreg of a promoted
1049 variable, where the promotion is sign-extended, and the
1050 target mode is the same as the variable's promotion. */
1051 if (GET_CODE (op) == SUBREG
1052 && SUBREG_PROMOTED_VAR_P (op)
1053 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1054 && GET_MODE (XEXP (op, 0)) == mode)
1055 return XEXP (op, 0);
1057 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1058 if (! POINTERS_EXTEND_UNSIGNED
1059 && mode == Pmode && GET_MODE (op) == ptr_mode
1060 && (CONSTANT_P (op)
1061 || (GET_CODE (op) == SUBREG
1062 && REG_P (SUBREG_REG (op))
1063 && REG_POINTER (SUBREG_REG (op))
1064 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1065 return convert_memory_address (Pmode, op);
1066 #endif
1067 break;
1069 case ZERO_EXTEND:
1070 /* Check for a zero extension of a subreg of a promoted
1071 variable, where the promotion is zero-extended, and the
1072 target mode is the same as the variable's promotion. */
1073 if (GET_CODE (op) == SUBREG
1074 && SUBREG_PROMOTED_VAR_P (op)
1075 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1076 && GET_MODE (XEXP (op, 0)) == mode)
1077 return XEXP (op, 0);
1079 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1080 if (POINTERS_EXTEND_UNSIGNED > 0
1081 && mode == Pmode && GET_MODE (op) == ptr_mode
1082 && (CONSTANT_P (op)
1083 || (GET_CODE (op) == SUBREG
1084 && REG_P (SUBREG_REG (op))
1085 && REG_POINTER (SUBREG_REG (op))
1086 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1087 return convert_memory_address (Pmode, op);
1088 #endif
1089 break;
1091 default:
1092 break;
1095 return 0;
1099 /* Subroutine of simplify_binary_operation to simplify a commutative,
1100 associative binary operation CODE with result mode MODE, operating
1101 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1102 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1103 canonicalization is possible. */
1105 static rtx
1106 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1107 rtx op0, rtx op1)
1109 rtx tem;
1111 /* Linearize the operator to the left. */
1112 if (GET_CODE (op1) == code)
1114 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1115 if (GET_CODE (op0) == code)
1117 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1118 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1121 /* "a op (b op c)" becomes "(b op c) op a". */
1122 if (! swap_commutative_operands_p (op1, op0))
1123 return simplify_gen_binary (code, mode, op1, op0);
1125 tem = op0;
1126 op0 = op1;
1127 op1 = tem;
1130 if (GET_CODE (op0) == code)
1132 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1133 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1135 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1136 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1139 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1140 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1141 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1142 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1143 if (tem != 0)
1144 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1146 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1147 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1148 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1149 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1150 if (tem != 0)
1151 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1154 return 0;
1157 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1158 and OP1. Return 0 if no simplification is possible.
1160 Don't use this for relational operations such as EQ or LT.
1161 Use simplify_relational_operation instead. */
1163 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1164 rtx op0, rtx op1)
1166 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1167 HOST_WIDE_INT val;
1168 unsigned int width = GET_MODE_BITSIZE (mode);
1169 rtx trueop0, trueop1;
1170 rtx tem;
1172 /* Relational operations don't work here. We must know the mode
1173 of the operands in order to do the comparison correctly.
1174 Assuming a full word can give incorrect results.
1175 Consider comparing 128 with -128 in QImode. */
1176 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1177 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1179 /* Make sure the constant is second. */
1180 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1181 && swap_commutative_operands_p (op0, op1))
1183 tem = op0, op0 = op1, op1 = tem;
1186 trueop0 = avoid_constant_pool_reference (op0);
1187 trueop1 = avoid_constant_pool_reference (op1);
1189 if (VECTOR_MODE_P (mode)
1190 && code != VEC_CONCAT
1191 && GET_CODE (trueop0) == CONST_VECTOR
1192 && GET_CODE (trueop1) == CONST_VECTOR)
1194 unsigned n_elts = GET_MODE_NUNITS (mode);
1195 enum machine_mode op0mode = GET_MODE (trueop0);
1196 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
1197 enum machine_mode op1mode = GET_MODE (trueop1);
1198 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
1199 rtvec v = rtvec_alloc (n_elts);
1200 unsigned int i;
1202 gcc_assert (op0_n_elts == n_elts);
1203 gcc_assert (op1_n_elts == n_elts);
1204 for (i = 0; i < n_elts; i++)
1206 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1207 CONST_VECTOR_ELT (trueop0, i),
1208 CONST_VECTOR_ELT (trueop1, i));
1209 if (!x)
1210 return 0;
1211 RTVEC_ELT (v, i) = x;
1214 return gen_rtx_CONST_VECTOR (mode, v);
1217 if (VECTOR_MODE_P (mode)
1218 && code == VEC_CONCAT
1219 && CONSTANT_P (trueop0) && CONSTANT_P (trueop1))
1221 unsigned n_elts = GET_MODE_NUNITS (mode);
1222 rtvec v = rtvec_alloc (n_elts);
1224 gcc_assert (n_elts >= 2);
1225 if (n_elts == 2)
1227 gcc_assert (GET_CODE (trueop0) != CONST_VECTOR);
1228 gcc_assert (GET_CODE (trueop1) != CONST_VECTOR);
1230 RTVEC_ELT (v, 0) = trueop0;
1231 RTVEC_ELT (v, 1) = trueop1;
1233 else
1235 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (trueop0));
1236 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (trueop1));
1237 unsigned i;
1239 gcc_assert (GET_CODE (trueop0) == CONST_VECTOR);
1240 gcc_assert (GET_CODE (trueop1) == CONST_VECTOR);
1241 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
1243 for (i = 0; i < op0_n_elts; ++i)
1244 RTVEC_ELT (v, i) = XVECEXP (trueop0, 0, i);
1245 for (i = 0; i < op1_n_elts; ++i)
1246 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (trueop1, 0, i);
1249 return gen_rtx_CONST_VECTOR (mode, v);
1252 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1253 && GET_CODE (trueop0) == CONST_DOUBLE
1254 && GET_CODE (trueop1) == CONST_DOUBLE
1255 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1257 if (code == AND
1258 || code == IOR
1259 || code == XOR)
1261 long tmp0[4];
1262 long tmp1[4];
1263 REAL_VALUE_TYPE r;
1264 int i;
1266 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1267 GET_MODE (op0));
1268 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1269 GET_MODE (op1));
1270 for (i = 0; i < 4; i++)
1272 switch (code)
1274 case AND:
1275 tmp0[i] &= tmp1[i];
1276 break;
1277 case IOR:
1278 tmp0[i] |= tmp1[i];
1279 break;
1280 case XOR:
1281 tmp0[i] ^= tmp1[i];
1282 break;
1283 default:
1284 gcc_unreachable ();
1287 real_from_target (&r, tmp0, mode);
1288 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1290 else
1292 REAL_VALUE_TYPE f0, f1, value, result;
1293 bool inexact;
1295 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1296 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1297 real_convert (&f0, mode, &f0);
1298 real_convert (&f1, mode, &f1);
1300 if (HONOR_SNANS (mode)
1301 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1302 return 0;
1304 if (code == DIV
1305 && REAL_VALUES_EQUAL (f1, dconst0)
1306 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1307 return 0;
1309 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1310 && flag_trapping_math
1311 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1313 int s0 = REAL_VALUE_NEGATIVE (f0);
1314 int s1 = REAL_VALUE_NEGATIVE (f1);
1316 switch (code)
1318 case PLUS:
1319 /* Inf + -Inf = NaN plus exception. */
1320 if (s0 != s1)
1321 return 0;
1322 break;
1323 case MINUS:
1324 /* Inf - Inf = NaN plus exception. */
1325 if (s0 == s1)
1326 return 0;
1327 break;
1328 case DIV:
1329 /* Inf / Inf = NaN plus exception. */
1330 return 0;
1331 default:
1332 break;
1336 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1337 && flag_trapping_math
1338 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1339 || (REAL_VALUE_ISINF (f1)
1340 && REAL_VALUES_EQUAL (f0, dconst0))))
1341 /* Inf * 0 = NaN plus exception. */
1342 return 0;
1344 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
1345 &f0, &f1);
1346 real_convert (&result, mode, &value);
1348 /* Don't constant fold this floating point operation if the
1349 result may dependent upon the run-time rounding mode and
1350 flag_rounding_math is set, or if GCC's software emulation
1351 is unable to accurately represent the result. */
1353 if ((flag_rounding_math
1354 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
1355 && !flag_unsafe_math_optimizations))
1356 && (inexact || !real_identical (&result, &value)))
1357 return NULL_RTX;
1359 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
1363 /* We can fold some multi-word operations. */
1364 if (GET_MODE_CLASS (mode) == MODE_INT
1365 && width == HOST_BITS_PER_WIDE_INT * 2
1366 && (GET_CODE (trueop0) == CONST_DOUBLE
1367 || GET_CODE (trueop0) == CONST_INT)
1368 && (GET_CODE (trueop1) == CONST_DOUBLE
1369 || GET_CODE (trueop1) == CONST_INT))
1371 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1372 HOST_WIDE_INT h1, h2, hv, ht;
1374 if (GET_CODE (trueop0) == CONST_DOUBLE)
1375 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1376 else
1377 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1379 if (GET_CODE (trueop1) == CONST_DOUBLE)
1380 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1381 else
1382 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1384 switch (code)
1386 case MINUS:
1387 /* A - B == A + (-B). */
1388 neg_double (l2, h2, &lv, &hv);
1389 l2 = lv, h2 = hv;
1391 /* Fall through.... */
1393 case PLUS:
1394 add_double (l1, h1, l2, h2, &lv, &hv);
1395 break;
1397 case MULT:
1398 mul_double (l1, h1, l2, h2, &lv, &hv);
1399 break;
1401 case DIV:
1402 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1403 &lv, &hv, &lt, &ht))
1404 return 0;
1405 break;
1407 case MOD:
1408 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1409 &lt, &ht, &lv, &hv))
1410 return 0;
1411 break;
1413 case UDIV:
1414 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1415 &lv, &hv, &lt, &ht))
1416 return 0;
1417 break;
1419 case UMOD:
1420 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1421 &lt, &ht, &lv, &hv))
1422 return 0;
1423 break;
1425 case AND:
1426 lv = l1 & l2, hv = h1 & h2;
1427 break;
1429 case IOR:
1430 lv = l1 | l2, hv = h1 | h2;
1431 break;
1433 case XOR:
1434 lv = l1 ^ l2, hv = h1 ^ h2;
1435 break;
1437 case SMIN:
1438 if (h1 < h2
1439 || (h1 == h2
1440 && ((unsigned HOST_WIDE_INT) l1
1441 < (unsigned HOST_WIDE_INT) l2)))
1442 lv = l1, hv = h1;
1443 else
1444 lv = l2, hv = h2;
1445 break;
1447 case SMAX:
1448 if (h1 > h2
1449 || (h1 == h2
1450 && ((unsigned HOST_WIDE_INT) l1
1451 > (unsigned HOST_WIDE_INT) l2)))
1452 lv = l1, hv = h1;
1453 else
1454 lv = l2, hv = h2;
1455 break;
1457 case UMIN:
1458 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1459 || (h1 == h2
1460 && ((unsigned HOST_WIDE_INT) l1
1461 < (unsigned HOST_WIDE_INT) l2)))
1462 lv = l1, hv = h1;
1463 else
1464 lv = l2, hv = h2;
1465 break;
1467 case UMAX:
1468 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1469 || (h1 == h2
1470 && ((unsigned HOST_WIDE_INT) l1
1471 > (unsigned HOST_WIDE_INT) l2)))
1472 lv = l1, hv = h1;
1473 else
1474 lv = l2, hv = h2;
1475 break;
1477 case LSHIFTRT: case ASHIFTRT:
1478 case ASHIFT:
1479 case ROTATE: case ROTATERT:
1480 if (SHIFT_COUNT_TRUNCATED)
1481 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1483 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1484 return 0;
1486 if (code == LSHIFTRT || code == ASHIFTRT)
1487 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1488 code == ASHIFTRT);
1489 else if (code == ASHIFT)
1490 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1491 else if (code == ROTATE)
1492 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1493 else /* code == ROTATERT */
1494 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1495 break;
1497 default:
1498 return 0;
1501 return immed_double_const (lv, hv, mode);
1504 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1505 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1507 /* Even if we can't compute a constant result,
1508 there are some cases worth simplifying. */
1510 switch (code)
1512 case PLUS:
1513 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1514 when x is NaN, infinite, or finite and nonzero. They aren't
1515 when x is -0 and the rounding mode is not towards -infinity,
1516 since (-0) + 0 is then 0. */
1517 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1518 return op0;
1520 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1521 transformations are safe even for IEEE. */
1522 if (GET_CODE (op0) == NEG)
1523 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1524 else if (GET_CODE (op1) == NEG)
1525 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1527 /* (~a) + 1 -> -a */
1528 if (INTEGRAL_MODE_P (mode)
1529 && GET_CODE (op0) == NOT
1530 && trueop1 == const1_rtx)
1531 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1533 /* Handle both-operands-constant cases. We can only add
1534 CONST_INTs to constants since the sum of relocatable symbols
1535 can't be handled by most assemblers. Don't add CONST_INT
1536 to CONST_INT since overflow won't be computed properly if wider
1537 than HOST_BITS_PER_WIDE_INT. */
1539 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1540 && GET_CODE (op1) == CONST_INT)
1541 return plus_constant (op0, INTVAL (op1));
1542 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1543 && GET_CODE (op0) == CONST_INT)
1544 return plus_constant (op1, INTVAL (op0));
1546 /* See if this is something like X * C - X or vice versa or
1547 if the multiplication is written as a shift. If so, we can
1548 distribute and make a new multiply, shift, or maybe just
1549 have X (if C is 2 in the example above). But don't make
1550 something more expensive than we had before. */
1552 if (SCALAR_INT_MODE_P (mode))
1554 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1555 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1556 rtx lhs = op0, rhs = op1;
1558 if (GET_CODE (lhs) == NEG)
1560 coeff0l = -1;
1561 coeff0h = -1;
1562 lhs = XEXP (lhs, 0);
1564 else if (GET_CODE (lhs) == MULT
1565 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1567 coeff0l = INTVAL (XEXP (lhs, 1));
1568 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1569 lhs = XEXP (lhs, 0);
1571 else if (GET_CODE (lhs) == ASHIFT
1572 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1573 && INTVAL (XEXP (lhs, 1)) >= 0
1574 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1576 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1577 coeff0h = 0;
1578 lhs = XEXP (lhs, 0);
1581 if (GET_CODE (rhs) == NEG)
1583 coeff1l = -1;
1584 coeff1h = -1;
1585 rhs = XEXP (rhs, 0);
1587 else if (GET_CODE (rhs) == MULT
1588 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1590 coeff1l = INTVAL (XEXP (rhs, 1));
1591 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1592 rhs = XEXP (rhs, 0);
1594 else if (GET_CODE (rhs) == ASHIFT
1595 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1596 && INTVAL (XEXP (rhs, 1)) >= 0
1597 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1599 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1600 coeff1h = 0;
1601 rhs = XEXP (rhs, 0);
1604 if (rtx_equal_p (lhs, rhs))
1606 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1607 rtx coeff;
1608 unsigned HOST_WIDE_INT l;
1609 HOST_WIDE_INT h;
1611 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1612 coeff = immed_double_const (l, h, mode);
1614 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1615 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1616 ? tem : 0;
1620 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1621 if ((GET_CODE (op1) == CONST_INT
1622 || GET_CODE (op1) == CONST_DOUBLE)
1623 && GET_CODE (op0) == XOR
1624 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1625 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1626 && mode_signbit_p (mode, op1))
1627 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1628 simplify_gen_binary (XOR, mode, op1,
1629 XEXP (op0, 1)));
1631 /* If one of the operands is a PLUS or a MINUS, see if we can
1632 simplify this by the associative law.
1633 Don't use the associative law for floating point.
1634 The inaccuracy makes it nonassociative,
1635 and subtle programs can break if operations are associated. */
1637 if (INTEGRAL_MODE_P (mode)
1638 && (plus_minus_operand_p (op0)
1639 || plus_minus_operand_p (op1))
1640 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1641 return tem;
1643 /* Reassociate floating point addition only when the user
1644 specifies unsafe math optimizations. */
1645 if (FLOAT_MODE_P (mode)
1646 && flag_unsafe_math_optimizations)
1648 tem = simplify_associative_operation (code, mode, op0, op1);
1649 if (tem)
1650 return tem;
1652 break;
1654 case COMPARE:
1655 #ifdef HAVE_cc0
1656 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1657 using cc0, in which case we want to leave it as a COMPARE
1658 so we can distinguish it from a register-register-copy.
1660 In IEEE floating point, x-0 is not the same as x. */
1662 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1663 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1664 && trueop1 == CONST0_RTX (mode))
1665 return op0;
1666 #endif
1668 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1669 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1670 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1671 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1673 rtx xop00 = XEXP (op0, 0);
1674 rtx xop10 = XEXP (op1, 0);
1676 #ifdef HAVE_cc0
1677 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1678 #else
1679 if (REG_P (xop00) && REG_P (xop10)
1680 && GET_MODE (xop00) == GET_MODE (xop10)
1681 && REGNO (xop00) == REGNO (xop10)
1682 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1683 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1684 #endif
1685 return xop00;
1687 break;
1689 case MINUS:
1690 /* We can't assume x-x is 0 even with non-IEEE floating point,
1691 but since it is zero except in very strange circumstances, we
1692 will treat it as zero with -funsafe-math-optimizations. */
1693 if (rtx_equal_p (trueop0, trueop1)
1694 && ! side_effects_p (op0)
1695 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1696 return CONST0_RTX (mode);
1698 /* Change subtraction from zero into negation. (0 - x) is the
1699 same as -x when x is NaN, infinite, or finite and nonzero.
1700 But if the mode has signed zeros, and does not round towards
1701 -infinity, then 0 - 0 is 0, not -0. */
1702 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1703 return simplify_gen_unary (NEG, mode, op1, mode);
1705 /* (-1 - a) is ~a. */
1706 if (trueop0 == constm1_rtx)
1707 return simplify_gen_unary (NOT, mode, op1, mode);
1709 /* Subtracting 0 has no effect unless the mode has signed zeros
1710 and supports rounding towards -infinity. In such a case,
1711 0 - 0 is -0. */
1712 if (!(HONOR_SIGNED_ZEROS (mode)
1713 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1714 && trueop1 == CONST0_RTX (mode))
1715 return op0;
1717 /* See if this is something like X * C - X or vice versa or
1718 if the multiplication is written as a shift. If so, we can
1719 distribute and make a new multiply, shift, or maybe just
1720 have X (if C is 2 in the example above). But don't make
1721 something more expensive than we had before. */
1723 if (SCALAR_INT_MODE_P (mode))
1725 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1726 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1727 rtx lhs = op0, rhs = op1;
1729 if (GET_CODE (lhs) == NEG)
1731 coeff0l = -1;
1732 coeff0h = -1;
1733 lhs = XEXP (lhs, 0);
1735 else if (GET_CODE (lhs) == MULT
1736 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1738 coeff0l = INTVAL (XEXP (lhs, 1));
1739 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1740 lhs = XEXP (lhs, 0);
1742 else if (GET_CODE (lhs) == ASHIFT
1743 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1744 && INTVAL (XEXP (lhs, 1)) >= 0
1745 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1747 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1748 coeff0h = 0;
1749 lhs = XEXP (lhs, 0);
1752 if (GET_CODE (rhs) == NEG)
1754 negcoeff1l = 1;
1755 negcoeff1h = 0;
1756 rhs = XEXP (rhs, 0);
1758 else if (GET_CODE (rhs) == MULT
1759 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1761 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1762 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1763 rhs = XEXP (rhs, 0);
1765 else if (GET_CODE (rhs) == ASHIFT
1766 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1767 && INTVAL (XEXP (rhs, 1)) >= 0
1768 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1770 negcoeff1l = -(((HOST_WIDE_INT) 1)
1771 << INTVAL (XEXP (rhs, 1)));
1772 negcoeff1h = -1;
1773 rhs = XEXP (rhs, 0);
1776 if (rtx_equal_p (lhs, rhs))
1778 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1779 rtx coeff;
1780 unsigned HOST_WIDE_INT l;
1781 HOST_WIDE_INT h;
1783 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h,
1784 &l, &h);
1785 coeff = immed_double_const (l, h, mode);
1787 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1788 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1789 ? tem : 0;
1793 /* (a - (-b)) -> (a + b). True even for IEEE. */
1794 if (GET_CODE (op1) == NEG)
1795 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1797 /* (-x - c) may be simplified as (-c - x). */
1798 if (GET_CODE (op0) == NEG
1799 && (GET_CODE (op1) == CONST_INT
1800 || GET_CODE (op1) == CONST_DOUBLE))
1802 tem = simplify_unary_operation (NEG, mode, op1, mode);
1803 if (tem)
1804 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1807 /* If one of the operands is a PLUS or a MINUS, see if we can
1808 simplify this by the associative law.
1809 Don't use the associative law for floating point.
1810 The inaccuracy makes it nonassociative,
1811 and subtle programs can break if operations are associated. */
1813 if (INTEGRAL_MODE_P (mode)
1814 && (plus_minus_operand_p (op0)
1815 || plus_minus_operand_p (op1))
1816 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1817 return tem;
1819 /* Don't let a relocatable value get a negative coeff. */
1820 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1821 return simplify_gen_binary (PLUS, mode,
1822 op0,
1823 neg_const_int (mode, op1));
1825 /* (x - (x & y)) -> (x & ~y) */
1826 if (GET_CODE (op1) == AND)
1828 if (rtx_equal_p (op0, XEXP (op1, 0)))
1830 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1831 GET_MODE (XEXP (op1, 1)));
1832 return simplify_gen_binary (AND, mode, op0, tem);
1834 if (rtx_equal_p (op0, XEXP (op1, 1)))
1836 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1837 GET_MODE (XEXP (op1, 0)));
1838 return simplify_gen_binary (AND, mode, op0, tem);
1841 break;
1843 case MULT:
1844 if (trueop1 == constm1_rtx)
1845 return simplify_gen_unary (NEG, mode, op0, mode);
1847 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1848 x is NaN, since x * 0 is then also NaN. Nor is it valid
1849 when the mode has signed zeros, since multiplying a negative
1850 number by 0 will give -0, not 0. */
1851 if (!HONOR_NANS (mode)
1852 && !HONOR_SIGNED_ZEROS (mode)
1853 && trueop1 == CONST0_RTX (mode)
1854 && ! side_effects_p (op0))
1855 return op1;
1857 /* In IEEE floating point, x*1 is not equivalent to x for
1858 signalling NaNs. */
1859 if (!HONOR_SNANS (mode)
1860 && trueop1 == CONST1_RTX (mode))
1861 return op0;
1863 /* Convert multiply by constant power of two into shift unless
1864 we are still generating RTL. This test is a kludge. */
1865 if (GET_CODE (trueop1) == CONST_INT
1866 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1867 /* If the mode is larger than the host word size, and the
1868 uppermost bit is set, then this isn't a power of two due
1869 to implicit sign extension. */
1870 && (width <= HOST_BITS_PER_WIDE_INT
1871 || val != HOST_BITS_PER_WIDE_INT - 1))
1872 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1874 /* Likewise for multipliers wider than a word. */
1875 else if (GET_CODE (trueop1) == CONST_DOUBLE
1876 && (GET_MODE (trueop1) == VOIDmode
1877 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1878 && GET_MODE (op0) == mode
1879 && CONST_DOUBLE_LOW (trueop1) == 0
1880 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1881 return simplify_gen_binary (ASHIFT, mode, op0,
1882 GEN_INT (val
1883 + HOST_BITS_PER_WIDE_INT));
1885 /* x*2 is x+x and x*(-1) is -x */
1886 if (GET_CODE (trueop1) == CONST_DOUBLE
1887 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1888 && GET_MODE (op0) == mode)
1890 REAL_VALUE_TYPE d;
1891 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1893 if (REAL_VALUES_EQUAL (d, dconst2))
1894 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1896 if (REAL_VALUES_EQUAL (d, dconstm1))
1897 return simplify_gen_unary (NEG, mode, op0, mode);
1900 /* Reassociate multiplication, but for floating point MULTs
1901 only when the user specifies unsafe math optimizations. */
1902 if (! FLOAT_MODE_P (mode)
1903 || flag_unsafe_math_optimizations)
1905 tem = simplify_associative_operation (code, mode, op0, op1);
1906 if (tem)
1907 return tem;
1909 break;
1911 case IOR:
1912 if (trueop1 == const0_rtx)
1913 return op0;
1914 if (GET_CODE (trueop1) == CONST_INT
1915 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1916 == GET_MODE_MASK (mode)))
1917 return op1;
1918 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1919 return op0;
1920 /* A | (~A) -> -1 */
1921 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1922 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1923 && ! side_effects_p (op0)
1924 && SCALAR_INT_MODE_P (mode))
1925 return constm1_rtx;
1926 tem = simplify_associative_operation (code, mode, op0, op1);
1927 if (tem)
1928 return tem;
1929 break;
1931 case XOR:
1932 if (trueop1 == const0_rtx)
1933 return op0;
1934 if (GET_CODE (trueop1) == CONST_INT
1935 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1936 == GET_MODE_MASK (mode)))
1937 return simplify_gen_unary (NOT, mode, op0, mode);
1938 if (trueop0 == trueop1
1939 && ! side_effects_p (op0)
1940 && GET_MODE_CLASS (mode) != MODE_CC)
1941 return CONST0_RTX (mode);
1943 /* Canonicalize XOR of the most significant bit to PLUS. */
1944 if ((GET_CODE (op1) == CONST_INT
1945 || GET_CODE (op1) == CONST_DOUBLE)
1946 && mode_signbit_p (mode, op1))
1947 return simplify_gen_binary (PLUS, mode, op0, op1);
1948 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1949 if ((GET_CODE (op1) == CONST_INT
1950 || GET_CODE (op1) == CONST_DOUBLE)
1951 && GET_CODE (op0) == PLUS
1952 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1953 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1954 && mode_signbit_p (mode, XEXP (op0, 1)))
1955 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1956 simplify_gen_binary (XOR, mode, op1,
1957 XEXP (op0, 1)));
1959 tem = simplify_associative_operation (code, mode, op0, op1);
1960 if (tem)
1961 return tem;
1962 break;
1964 case AND:
1965 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
1966 return trueop1;
1967 /* If we are turning off bits already known off in OP0, we need
1968 not do an AND. */
1969 if (GET_CODE (trueop1) == CONST_INT
1970 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1971 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1972 return op0;
1973 if (trueop0 == trueop1 && ! side_effects_p (op0)
1974 && GET_MODE_CLASS (mode) != MODE_CC)
1975 return op0;
1976 /* A & (~A) -> 0 */
1977 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1978 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1979 && ! side_effects_p (op0)
1980 && GET_MODE_CLASS (mode) != MODE_CC)
1981 return CONST0_RTX (mode);
1983 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1984 there are no nonzero bits of C outside of X's mode. */
1985 if ((GET_CODE (op0) == SIGN_EXTEND
1986 || GET_CODE (op0) == ZERO_EXTEND)
1987 && GET_CODE (trueop1) == CONST_INT
1988 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1989 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1990 & INTVAL (trueop1)) == 0)
1992 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1993 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1994 gen_int_mode (INTVAL (trueop1),
1995 imode));
1996 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1999 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2000 ((A & N) + B) & M -> (A + B) & M
2001 Similarly if (N & M) == 0,
2002 ((A | N) + B) & M -> (A + B) & M
2003 and for - instead of + and/or ^ instead of |. */
2004 if (GET_CODE (trueop1) == CONST_INT
2005 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2006 && ~INTVAL (trueop1)
2007 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2008 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2010 rtx pmop[2];
2011 int which;
2013 pmop[0] = XEXP (op0, 0);
2014 pmop[1] = XEXP (op0, 1);
2016 for (which = 0; which < 2; which++)
2018 tem = pmop[which];
2019 switch (GET_CODE (tem))
2021 case AND:
2022 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2023 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2024 == INTVAL (trueop1))
2025 pmop[which] = XEXP (tem, 0);
2026 break;
2027 case IOR:
2028 case XOR:
2029 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2030 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2031 pmop[which] = XEXP (tem, 0);
2032 break;
2033 default:
2034 break;
2038 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2040 tem = simplify_gen_binary (GET_CODE (op0), mode,
2041 pmop[0], pmop[1]);
2042 return simplify_gen_binary (code, mode, tem, op1);
2045 tem = simplify_associative_operation (code, mode, op0, op1);
2046 if (tem)
2047 return tem;
2048 break;
2050 case UDIV:
2051 /* 0/x is 0 (or x&0 if x has side-effects). */
2052 if (trueop0 == CONST0_RTX (mode))
2054 if (side_effects_p (op1))
2055 return simplify_gen_binary (AND, mode, op1, trueop0);
2056 return trueop0;
2058 /* x/1 is x. */
2059 if (trueop1 == CONST1_RTX (mode))
2061 /* Handle narrowing UDIV. */
2062 rtx x = gen_lowpart_common (mode, op0);
2063 if (x)
2064 return x;
2065 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2066 return gen_lowpart_SUBREG (mode, op0);
2067 return op0;
2069 /* Convert divide by power of two into shift. */
2070 if (GET_CODE (trueop1) == CONST_INT
2071 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
2072 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
2073 break;
2075 case DIV:
2076 /* Handle floating point and integers separately. */
2077 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2079 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2080 safe for modes with NaNs, since 0.0 / 0.0 will then be
2081 NaN rather than 0.0. Nor is it safe for modes with signed
2082 zeros, since dividing 0 by a negative number gives -0.0 */
2083 if (trueop0 == CONST0_RTX (mode)
2084 && !HONOR_NANS (mode)
2085 && !HONOR_SIGNED_ZEROS (mode)
2086 && ! side_effects_p (op1))
2087 return op0;
2088 /* x/1.0 is x. */
2089 if (trueop1 == CONST1_RTX (mode)
2090 && !HONOR_SNANS (mode))
2091 return op0;
2093 if (GET_CODE (trueop1) == CONST_DOUBLE
2094 && trueop1 != CONST0_RTX (mode))
2096 REAL_VALUE_TYPE d;
2097 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2099 /* x/-1.0 is -x. */
2100 if (REAL_VALUES_EQUAL (d, dconstm1)
2101 && !HONOR_SNANS (mode))
2102 return simplify_gen_unary (NEG, mode, op0, mode);
2104 /* Change FP division by a constant into multiplication.
2105 Only do this with -funsafe-math-optimizations. */
2106 if (flag_unsafe_math_optimizations
2107 && !REAL_VALUES_EQUAL (d, dconst0))
2109 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2110 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2111 return simplify_gen_binary (MULT, mode, op0, tem);
2115 else
2117 /* 0/x is 0 (or x&0 if x has side-effects). */
2118 if (trueop0 == CONST0_RTX (mode))
2120 if (side_effects_p (op1))
2121 return simplify_gen_binary (AND, mode, op1, trueop0);
2122 return trueop0;
2124 /* x/1 is x. */
2125 if (trueop1 == CONST1_RTX (mode))
2127 /* Handle narrowing DIV. */
2128 rtx x = gen_lowpart_common (mode, op0);
2129 if (x)
2130 return x;
2131 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2132 return gen_lowpart_SUBREG (mode, op0);
2133 return op0;
2135 /* x/-1 is -x. */
2136 if (trueop1 == constm1_rtx)
2138 rtx x = gen_lowpart_common (mode, op0);
2139 if (!x)
2140 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2141 ? gen_lowpart_SUBREG (mode, op0) : op0;
2142 return simplify_gen_unary (NEG, mode, x, mode);
2145 break;
2147 case UMOD:
2148 /* 0%x is 0 (or x&0 if x has side-effects). */
2149 if (trueop0 == CONST0_RTX (mode))
2151 if (side_effects_p (op1))
2152 return simplify_gen_binary (AND, mode, op1, trueop0);
2153 return trueop0;
2155 /* x%1 is 0 (of x&0 if x has side-effects). */
2156 if (trueop1 == CONST1_RTX (mode))
2158 if (side_effects_p (op0))
2159 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2160 return CONST0_RTX (mode);
2162 /* Implement modulus by power of two as AND. */
2163 if (GET_CODE (trueop1) == CONST_INT
2164 && exact_log2 (INTVAL (trueop1)) > 0)
2165 return simplify_gen_binary (AND, mode, op0,
2166 GEN_INT (INTVAL (op1) - 1));
2167 break;
2169 case MOD:
2170 /* 0%x is 0 (or x&0 if x has side-effects). */
2171 if (trueop0 == CONST0_RTX (mode))
2173 if (side_effects_p (op1))
2174 return simplify_gen_binary (AND, mode, op1, trueop0);
2175 return trueop0;
2177 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2178 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2180 if (side_effects_p (op0))
2181 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2182 return CONST0_RTX (mode);
2184 break;
2186 case ROTATERT:
2187 case ROTATE:
2188 case ASHIFTRT:
2189 /* Rotating ~0 always results in ~0. */
2190 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2191 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2192 && ! side_effects_p (op1))
2193 return op0;
2195 /* Fall through.... */
2197 case ASHIFT:
2198 case LSHIFTRT:
2199 if (trueop1 == CONST0_RTX (mode))
2200 return op0;
2201 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2202 return op0;
2203 break;
2205 case SMIN:
2206 if (width <= HOST_BITS_PER_WIDE_INT
2207 && GET_CODE (trueop1) == CONST_INT
2208 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2209 && ! side_effects_p (op0))
2210 return op1;
2211 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2212 return op0;
2213 tem = simplify_associative_operation (code, mode, op0, op1);
2214 if (tem)
2215 return tem;
2216 break;
2218 case SMAX:
2219 if (width <= HOST_BITS_PER_WIDE_INT
2220 && GET_CODE (trueop1) == CONST_INT
2221 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2222 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2223 && ! side_effects_p (op0))
2224 return op1;
2225 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2226 return op0;
2227 tem = simplify_associative_operation (code, mode, op0, op1);
2228 if (tem)
2229 return tem;
2230 break;
2232 case UMIN:
2233 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2234 return op1;
2235 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2236 return op0;
2237 tem = simplify_associative_operation (code, mode, op0, op1);
2238 if (tem)
2239 return tem;
2240 break;
2242 case UMAX:
2243 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2244 return op1;
2245 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2246 return op0;
2247 tem = simplify_associative_operation (code, mode, op0, op1);
2248 if (tem)
2249 return tem;
2250 break;
2252 case SS_PLUS:
2253 case US_PLUS:
2254 case SS_MINUS:
2255 case US_MINUS:
2256 /* ??? There are simplifications that can be done. */
2257 return 0;
2259 case VEC_SELECT:
2260 if (!VECTOR_MODE_P (mode))
2262 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2263 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2264 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2265 gcc_assert (XVECLEN (trueop1, 0) == 1);
2266 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2268 if (GET_CODE (trueop0) == CONST_VECTOR)
2269 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2270 (trueop1, 0, 0)));
2272 else
2274 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2275 gcc_assert (GET_MODE_INNER (mode)
2276 == GET_MODE_INNER (GET_MODE (trueop0)));
2277 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2279 if (GET_CODE (trueop0) == CONST_VECTOR)
2281 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2282 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2283 rtvec v = rtvec_alloc (n_elts);
2284 unsigned int i;
2286 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2287 for (i = 0; i < n_elts; i++)
2289 rtx x = XVECEXP (trueop1, 0, i);
2291 gcc_assert (GET_CODE (x) == CONST_INT);
2292 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2293 INTVAL (x));
2296 return gen_rtx_CONST_VECTOR (mode, v);
2299 return 0;
2300 case VEC_CONCAT:
2302 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2303 ? GET_MODE (trueop0)
2304 : GET_MODE_INNER (mode));
2305 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2306 ? GET_MODE (trueop1)
2307 : GET_MODE_INNER (mode));
2309 gcc_assert (VECTOR_MODE_P (mode));
2310 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2311 == GET_MODE_SIZE (mode));
2313 if (VECTOR_MODE_P (op0_mode))
2314 gcc_assert (GET_MODE_INNER (mode)
2315 == GET_MODE_INNER (op0_mode));
2316 else
2317 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2319 if (VECTOR_MODE_P (op1_mode))
2320 gcc_assert (GET_MODE_INNER (mode)
2321 == GET_MODE_INNER (op1_mode));
2322 else
2323 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2325 if ((GET_CODE (trueop0) == CONST_VECTOR
2326 || GET_CODE (trueop0) == CONST_INT
2327 || GET_CODE (trueop0) == CONST_DOUBLE)
2328 && (GET_CODE (trueop1) == CONST_VECTOR
2329 || GET_CODE (trueop1) == CONST_INT
2330 || GET_CODE (trueop1) == CONST_DOUBLE))
2332 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2333 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2334 rtvec v = rtvec_alloc (n_elts);
2335 unsigned int i;
2336 unsigned in_n_elts = 1;
2338 if (VECTOR_MODE_P (op0_mode))
2339 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2340 for (i = 0; i < n_elts; i++)
2342 if (i < in_n_elts)
2344 if (!VECTOR_MODE_P (op0_mode))
2345 RTVEC_ELT (v, i) = trueop0;
2346 else
2347 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2349 else
2351 if (!VECTOR_MODE_P (op1_mode))
2352 RTVEC_ELT (v, i) = trueop1;
2353 else
2354 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2355 i - in_n_elts);
2359 return gen_rtx_CONST_VECTOR (mode, v);
2362 return 0;
2364 default:
2365 gcc_unreachable ();
2368 return 0;
2371 /* Get the integer argument values in two forms:
2372 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2374 arg0 = INTVAL (trueop0);
2375 arg1 = INTVAL (trueop1);
2377 if (width < HOST_BITS_PER_WIDE_INT)
2379 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2380 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2382 arg0s = arg0;
2383 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2384 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2386 arg1s = arg1;
2387 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2388 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2390 else
2392 arg0s = arg0;
2393 arg1s = arg1;
2396 /* Compute the value of the arithmetic. */
2398 switch (code)
2400 case PLUS:
2401 val = arg0s + arg1s;
2402 break;
2404 case MINUS:
2405 val = arg0s - arg1s;
2406 break;
2408 case MULT:
2409 val = arg0s * arg1s;
2410 break;
2412 case DIV:
2413 if (arg1s == 0
2414 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2415 && arg1s == -1))
2416 return 0;
2417 val = arg0s / arg1s;
2418 break;
2420 case MOD:
2421 if (arg1s == 0
2422 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2423 && arg1s == -1))
2424 return 0;
2425 val = arg0s % arg1s;
2426 break;
2428 case UDIV:
2429 if (arg1 == 0
2430 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2431 && arg1s == -1))
2432 return 0;
2433 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2434 break;
2436 case UMOD:
2437 if (arg1 == 0
2438 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2439 && arg1s == -1))
2440 return 0;
2441 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2442 break;
2444 case AND:
2445 val = arg0 & arg1;
2446 break;
2448 case IOR:
2449 val = arg0 | arg1;
2450 break;
2452 case XOR:
2453 val = arg0 ^ arg1;
2454 break;
2456 case LSHIFTRT:
2457 case ASHIFT:
2458 case ASHIFTRT:
2459 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2460 value is in range. We can't return any old value for out-of-range
2461 arguments because either the middle-end (via shift_truncation_mask)
2462 or the back-end might be relying on target-specific knowledge.
2463 Nor can we rely on shift_truncation_mask, since the shift might
2464 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2465 if (SHIFT_COUNT_TRUNCATED)
2466 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2467 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2468 return 0;
2470 val = (code == ASHIFT
2471 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2472 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2474 /* Sign-extend the result for arithmetic right shifts. */
2475 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2476 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2477 break;
2479 case ROTATERT:
2480 if (arg1 < 0)
2481 return 0;
2483 arg1 %= width;
2484 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2485 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2486 break;
2488 case ROTATE:
2489 if (arg1 < 0)
2490 return 0;
2492 arg1 %= width;
2493 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2494 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2495 break;
2497 case COMPARE:
2498 /* Do nothing here. */
2499 return 0;
2501 case SMIN:
2502 val = arg0s <= arg1s ? arg0s : arg1s;
2503 break;
2505 case UMIN:
2506 val = ((unsigned HOST_WIDE_INT) arg0
2507 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2508 break;
2510 case SMAX:
2511 val = arg0s > arg1s ? arg0s : arg1s;
2512 break;
2514 case UMAX:
2515 val = ((unsigned HOST_WIDE_INT) arg0
2516 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2517 break;
2519 case SS_PLUS:
2520 case US_PLUS:
2521 case SS_MINUS:
2522 case US_MINUS:
2523 /* ??? There are simplifications that can be done. */
2524 return 0;
2526 default:
2527 gcc_unreachable ();
2530 val = trunc_int_for_mode (val, mode);
2532 return GEN_INT (val);
2535 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2536 PLUS or MINUS.
2538 Rather than test for specific case, we do this by a brute-force method
2539 and do all possible simplifications until no more changes occur. Then
2540 we rebuild the operation.
2542 If FORCE is true, then always generate the rtx. This is used to
2543 canonicalize stuff emitted from simplify_gen_binary. Note that this
2544 can still fail if the rtx is too complex. It won't fail just because
2545 the result is not 'simpler' than the input, however. */
2547 struct simplify_plus_minus_op_data
2549 rtx op;
2550 int neg;
2553 static int
2554 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2556 const struct simplify_plus_minus_op_data *d1 = p1;
2557 const struct simplify_plus_minus_op_data *d2 = p2;
2559 return (commutative_operand_precedence (d2->op)
2560 - commutative_operand_precedence (d1->op));
2563 static rtx
2564 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2565 rtx op1, int force)
2567 struct simplify_plus_minus_op_data ops[8];
2568 rtx result, tem;
2569 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2570 int first, changed;
2571 int i, j;
2573 memset (ops, 0, sizeof ops);
2575 /* Set up the two operands and then expand them until nothing has been
2576 changed. If we run out of room in our array, give up; this should
2577 almost never happen. */
2579 ops[0].op = op0;
2580 ops[0].neg = 0;
2581 ops[1].op = op1;
2582 ops[1].neg = (code == MINUS);
2586 changed = 0;
2588 for (i = 0; i < n_ops; i++)
2590 rtx this_op = ops[i].op;
2591 int this_neg = ops[i].neg;
2592 enum rtx_code this_code = GET_CODE (this_op);
2594 switch (this_code)
2596 case PLUS:
2597 case MINUS:
2598 if (n_ops == 7)
2599 return NULL_RTX;
2601 ops[n_ops].op = XEXP (this_op, 1);
2602 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2603 n_ops++;
2605 ops[i].op = XEXP (this_op, 0);
2606 input_ops++;
2607 changed = 1;
2608 break;
2610 case NEG:
2611 ops[i].op = XEXP (this_op, 0);
2612 ops[i].neg = ! this_neg;
2613 changed = 1;
2614 break;
2616 case CONST:
2617 if (n_ops < 7
2618 && GET_CODE (XEXP (this_op, 0)) == PLUS
2619 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2620 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2622 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2623 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2624 ops[n_ops].neg = this_neg;
2625 n_ops++;
2626 input_consts++;
2627 changed = 1;
2629 break;
2631 case NOT:
2632 /* ~a -> (-a - 1) */
2633 if (n_ops != 7)
2635 ops[n_ops].op = constm1_rtx;
2636 ops[n_ops++].neg = this_neg;
2637 ops[i].op = XEXP (this_op, 0);
2638 ops[i].neg = !this_neg;
2639 changed = 1;
2641 break;
2643 case CONST_INT:
2644 if (this_neg)
2646 ops[i].op = neg_const_int (mode, this_op);
2647 ops[i].neg = 0;
2648 changed = 1;
2650 break;
2652 default:
2653 break;
2657 while (changed);
2659 /* If we only have two operands, we can't do anything. */
2660 if (n_ops <= 2 && !force)
2661 return NULL_RTX;
2663 /* Count the number of CONSTs we didn't split above. */
2664 for (i = 0; i < n_ops; i++)
2665 if (GET_CODE (ops[i].op) == CONST)
2666 input_consts++;
2668 /* Now simplify each pair of operands until nothing changes. The first
2669 time through just simplify constants against each other. */
2671 first = 1;
2674 changed = first;
2676 for (i = 0; i < n_ops - 1; i++)
2677 for (j = i + 1; j < n_ops; j++)
2679 rtx lhs = ops[i].op, rhs = ops[j].op;
2680 int lneg = ops[i].neg, rneg = ops[j].neg;
2682 if (lhs != 0 && rhs != 0
2683 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2685 enum rtx_code ncode = PLUS;
2687 if (lneg != rneg)
2689 ncode = MINUS;
2690 if (lneg)
2691 tem = lhs, lhs = rhs, rhs = tem;
2693 else if (swap_commutative_operands_p (lhs, rhs))
2694 tem = lhs, lhs = rhs, rhs = tem;
2696 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2698 /* Reject "simplifications" that just wrap the two
2699 arguments in a CONST. Failure to do so can result
2700 in infinite recursion with simplify_binary_operation
2701 when it calls us to simplify CONST operations. */
2702 if (tem
2703 && ! (GET_CODE (tem) == CONST
2704 && GET_CODE (XEXP (tem, 0)) == ncode
2705 && XEXP (XEXP (tem, 0), 0) == lhs
2706 && XEXP (XEXP (tem, 0), 1) == rhs)
2707 /* Don't allow -x + -1 -> ~x simplifications in the
2708 first pass. This allows us the chance to combine
2709 the -1 with other constants. */
2710 && ! (first
2711 && GET_CODE (tem) == NOT
2712 && XEXP (tem, 0) == rhs))
2714 lneg &= rneg;
2715 if (GET_CODE (tem) == NEG)
2716 tem = XEXP (tem, 0), lneg = !lneg;
2717 if (GET_CODE (tem) == CONST_INT && lneg)
2718 tem = neg_const_int (mode, tem), lneg = 0;
2720 ops[i].op = tem;
2721 ops[i].neg = lneg;
2722 ops[j].op = NULL_RTX;
2723 changed = 1;
2728 first = 0;
2730 while (changed);
2732 /* Pack all the operands to the lower-numbered entries. */
2733 for (i = 0, j = 0; j < n_ops; j++)
2734 if (ops[j].op)
2735 ops[i++] = ops[j];
2736 n_ops = i;
2738 /* Sort the operations based on swap_commutative_operands_p. */
2739 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2741 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2742 if (n_ops == 2
2743 && GET_CODE (ops[1].op) == CONST_INT
2744 && CONSTANT_P (ops[0].op)
2745 && ops[0].neg)
2746 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2748 /* We suppressed creation of trivial CONST expressions in the
2749 combination loop to avoid recursion. Create one manually now.
2750 The combination loop should have ensured that there is exactly
2751 one CONST_INT, and the sort will have ensured that it is last
2752 in the array and that any other constant will be next-to-last. */
2754 if (n_ops > 1
2755 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2756 && CONSTANT_P (ops[n_ops - 2].op))
2758 rtx value = ops[n_ops - 1].op;
2759 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2760 value = neg_const_int (mode, value);
2761 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2762 n_ops--;
2765 /* Count the number of CONSTs that we generated. */
2766 n_consts = 0;
2767 for (i = 0; i < n_ops; i++)
2768 if (GET_CODE (ops[i].op) == CONST)
2769 n_consts++;
2771 /* Give up if we didn't reduce the number of operands we had. Make
2772 sure we count a CONST as two operands. If we have the same
2773 number of operands, but have made more CONSTs than before, this
2774 is also an improvement, so accept it. */
2775 if (!force
2776 && (n_ops + n_consts > input_ops
2777 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2778 return NULL_RTX;
2780 /* Put a non-negated operand first, if possible. */
2782 for (i = 0; i < n_ops && ops[i].neg; i++)
2783 continue;
2784 if (i == n_ops)
2785 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2786 else if (i != 0)
2788 tem = ops[0].op;
2789 ops[0] = ops[i];
2790 ops[i].op = tem;
2791 ops[i].neg = 1;
2794 /* Now make the result by performing the requested operations. */
2795 result = ops[0].op;
2796 for (i = 1; i < n_ops; i++)
2797 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2798 mode, result, ops[i].op);
2800 return result;
2803 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2804 static bool
2805 plus_minus_operand_p (rtx x)
2807 return GET_CODE (x) == PLUS
2808 || GET_CODE (x) == MINUS
2809 || (GET_CODE (x) == CONST
2810 && GET_CODE (XEXP (x, 0)) == PLUS
2811 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2812 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2815 /* Like simplify_binary_operation except used for relational operators.
2816 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2817 not also be VOIDmode.
2819 CMP_MODE specifies in which mode the comparison is done in, so it is
2820 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2821 the operands or, if both are VOIDmode, the operands are compared in
2822 "infinite precision". */
2824 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2825 enum machine_mode cmp_mode, rtx op0, rtx op1)
2827 rtx tem, trueop0, trueop1;
2829 if (cmp_mode == VOIDmode)
2830 cmp_mode = GET_MODE (op0);
2831 if (cmp_mode == VOIDmode)
2832 cmp_mode = GET_MODE (op1);
2834 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2835 if (tem)
2837 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2839 if (tem == const0_rtx)
2840 return CONST0_RTX (mode);
2841 #ifdef FLOAT_STORE_FLAG_VALUE
2843 REAL_VALUE_TYPE val;
2844 val = FLOAT_STORE_FLAG_VALUE (mode);
2845 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2847 #else
2848 return NULL_RTX;
2849 #endif
2851 if (VECTOR_MODE_P (mode))
2853 if (tem == const0_rtx)
2854 return CONST0_RTX (mode);
2855 #ifdef VECTOR_STORE_FLAG_VALUE
2857 int i, units;
2858 rtvec v;
2860 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2861 if (val == NULL_RTX)
2862 return NULL_RTX;
2863 if (val == const1_rtx)
2864 return CONST1_RTX (mode);
2866 units = GET_MODE_NUNITS (mode);
2867 v = rtvec_alloc (units);
2868 for (i = 0; i < units; i++)
2869 RTVEC_ELT (v, i) = val;
2870 return gen_rtx_raw_CONST_VECTOR (mode, v);
2872 #else
2873 return NULL_RTX;
2874 #endif
2877 return tem;
2880 /* For the following tests, ensure const0_rtx is op1. */
2881 if (swap_commutative_operands_p (op0, op1)
2882 || (op0 == const0_rtx && op1 != const0_rtx))
2883 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2885 /* If op0 is a compare, extract the comparison arguments from it. */
2886 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2887 return simplify_relational_operation (code, mode, VOIDmode,
2888 XEXP (op0, 0), XEXP (op0, 1));
2890 if (mode == VOIDmode
2891 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2892 || CC0_P (op0))
2893 return NULL_RTX;
2895 trueop0 = avoid_constant_pool_reference (op0);
2896 trueop1 = avoid_constant_pool_reference (op1);
2897 return simplify_relational_operation_1 (code, mode, cmp_mode,
2898 trueop0, trueop1);
2901 /* This part of simplify_relational_operation is only used when CMP_MODE
2902 is not in class MODE_CC (i.e. it is a real comparison).
2904 MODE is the mode of the result, while CMP_MODE specifies in which
2905 mode the comparison is done in, so it is the mode of the operands. */
2907 static rtx
2908 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2909 enum machine_mode cmp_mode, rtx op0, rtx op1)
2911 enum rtx_code op0code = GET_CODE (op0);
2913 if (GET_CODE (op1) == CONST_INT)
2915 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2917 /* If op0 is a comparison, extract the comparison arguments form it. */
2918 if (code == NE)
2920 if (GET_MODE (op0) == mode)
2921 return simplify_rtx (op0);
2922 else
2923 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2924 XEXP (op0, 0), XEXP (op0, 1));
2926 else if (code == EQ)
2928 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2929 if (new_code != UNKNOWN)
2930 return simplify_gen_relational (new_code, mode, VOIDmode,
2931 XEXP (op0, 0), XEXP (op0, 1));
2936 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2937 if ((code == EQ || code == NE)
2938 && (op0code == PLUS || op0code == MINUS)
2939 && CONSTANT_P (op1)
2940 && CONSTANT_P (XEXP (op0, 1))
2941 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
2943 rtx x = XEXP (op0, 0);
2944 rtx c = XEXP (op0, 1);
2946 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
2947 cmp_mode, op1, c);
2948 return simplify_gen_relational (code, mode, cmp_mode, x, c);
2951 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
2952 the same as (zero_extract:SI FOO (const_int 1) BAR). */
2953 if (code == NE
2954 && op1 == const0_rtx
2955 && GET_MODE_CLASS (mode) == MODE_INT
2956 && cmp_mode != VOIDmode
2957 /* ??? Work-around BImode bugs in the ia64 backend. */
2958 && mode != BImode
2959 && cmp_mode != BImode
2960 && nonzero_bits (op0, cmp_mode) == 1
2961 && STORE_FLAG_VALUE == 1)
2962 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
2963 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
2964 : lowpart_subreg (mode, op0, cmp_mode);
2966 return NULL_RTX;
2969 /* Check if the given comparison (done in the given MODE) is actually a
2970 tautology or a contradiction.
2971 If no simplification is possible, this function returns zero.
2972 Otherwise, it returns either const_true_rtx or const0_rtx. */
2975 simplify_const_relational_operation (enum rtx_code code,
2976 enum machine_mode mode,
2977 rtx op0, rtx op1)
2979 int equal, op0lt, op0ltu, op1lt, op1ltu;
2980 rtx tem;
2981 rtx trueop0;
2982 rtx trueop1;
2984 gcc_assert (mode != VOIDmode
2985 || (GET_MODE (op0) == VOIDmode
2986 && GET_MODE (op1) == VOIDmode));
2988 /* If op0 is a compare, extract the comparison arguments from it. */
2989 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2990 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2992 /* We can't simplify MODE_CC values since we don't know what the
2993 actual comparison is. */
2994 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2995 return 0;
2997 /* Make sure the constant is second. */
2998 if (swap_commutative_operands_p (op0, op1))
3000 tem = op0, op0 = op1, op1 = tem;
3001 code = swap_condition (code);
3004 trueop0 = avoid_constant_pool_reference (op0);
3005 trueop1 = avoid_constant_pool_reference (op1);
3007 /* For integer comparisons of A and B maybe we can simplify A - B and can
3008 then simplify a comparison of that with zero. If A and B are both either
3009 a register or a CONST_INT, this can't help; testing for these cases will
3010 prevent infinite recursion here and speed things up.
3012 If CODE is an unsigned comparison, then we can never do this optimization,
3013 because it gives an incorrect result if the subtraction wraps around zero.
3014 ANSI C defines unsigned operations such that they never overflow, and
3015 thus such cases can not be ignored; but we cannot do it even for
3016 signed comparisons for languages such as Java, so test flag_wrapv. */
3018 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3019 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3020 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3021 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3022 /* We cannot do this for == or != if tem is a nonzero address. */
3023 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3024 && code != GTU && code != GEU && code != LTU && code != LEU)
3025 return simplify_const_relational_operation (signed_condition (code),
3026 mode, tem, const0_rtx);
3028 if (flag_unsafe_math_optimizations && code == ORDERED)
3029 return const_true_rtx;
3031 if (flag_unsafe_math_optimizations && code == UNORDERED)
3032 return const0_rtx;
3034 /* For modes without NaNs, if the two operands are equal, we know the
3035 result except if they have side-effects. */
3036 if (! HONOR_NANS (GET_MODE (trueop0))
3037 && rtx_equal_p (trueop0, trueop1)
3038 && ! side_effects_p (trueop0))
3039 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3041 /* If the operands are floating-point constants, see if we can fold
3042 the result. */
3043 else if (GET_CODE (trueop0) == CONST_DOUBLE
3044 && GET_CODE (trueop1) == CONST_DOUBLE
3045 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
3047 REAL_VALUE_TYPE d0, d1;
3049 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3050 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3052 /* Comparisons are unordered iff at least one of the values is NaN. */
3053 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3054 switch (code)
3056 case UNEQ:
3057 case UNLT:
3058 case UNGT:
3059 case UNLE:
3060 case UNGE:
3061 case NE:
3062 case UNORDERED:
3063 return const_true_rtx;
3064 case EQ:
3065 case LT:
3066 case GT:
3067 case LE:
3068 case GE:
3069 case LTGT:
3070 case ORDERED:
3071 return const0_rtx;
3072 default:
3073 return 0;
3076 equal = REAL_VALUES_EQUAL (d0, d1);
3077 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3078 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3081 /* Otherwise, see if the operands are both integers. */
3082 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3083 && (GET_CODE (trueop0) == CONST_DOUBLE
3084 || GET_CODE (trueop0) == CONST_INT)
3085 && (GET_CODE (trueop1) == CONST_DOUBLE
3086 || GET_CODE (trueop1) == CONST_INT))
3088 int width = GET_MODE_BITSIZE (mode);
3089 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3090 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3092 /* Get the two words comprising each integer constant. */
3093 if (GET_CODE (trueop0) == CONST_DOUBLE)
3095 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3096 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3098 else
3100 l0u = l0s = INTVAL (trueop0);
3101 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3104 if (GET_CODE (trueop1) == CONST_DOUBLE)
3106 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3107 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3109 else
3111 l1u = l1s = INTVAL (trueop1);
3112 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3115 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3116 we have to sign or zero-extend the values. */
3117 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3119 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3120 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3122 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3123 l0s |= ((HOST_WIDE_INT) (-1) << width);
3125 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3126 l1s |= ((HOST_WIDE_INT) (-1) << width);
3128 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3129 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3131 equal = (h0u == h1u && l0u == l1u);
3132 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3133 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3134 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3135 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3138 /* Otherwise, there are some code-specific tests we can make. */
3139 else
3141 /* Optimize comparisons with upper and lower bounds. */
3142 if (SCALAR_INT_MODE_P (mode)
3143 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3145 rtx mmin, mmax;
3146 int sign;
3148 if (code == GEU
3149 || code == LEU
3150 || code == GTU
3151 || code == LTU)
3152 sign = 0;
3153 else
3154 sign = 1;
3156 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3158 tem = NULL_RTX;
3159 switch (code)
3161 case GEU:
3162 case GE:
3163 /* x >= min is always true. */
3164 if (rtx_equal_p (trueop1, mmin))
3165 tem = const_true_rtx;
3166 else
3167 break;
3169 case LEU:
3170 case LE:
3171 /* x <= max is always true. */
3172 if (rtx_equal_p (trueop1, mmax))
3173 tem = const_true_rtx;
3174 break;
3176 case GTU:
3177 case GT:
3178 /* x > max is always false. */
3179 if (rtx_equal_p (trueop1, mmax))
3180 tem = const0_rtx;
3181 break;
3183 case LTU:
3184 case LT:
3185 /* x < min is always false. */
3186 if (rtx_equal_p (trueop1, mmin))
3187 tem = const0_rtx;
3188 break;
3190 default:
3191 break;
3193 if (tem == const0_rtx
3194 || tem == const_true_rtx)
3195 return tem;
3198 switch (code)
3200 case EQ:
3201 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3202 return const0_rtx;
3203 break;
3205 case NE:
3206 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3207 return const_true_rtx;
3208 break;
3210 case LT:
3211 /* Optimize abs(x) < 0.0. */
3212 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3214 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3215 : trueop0;
3216 if (GET_CODE (tem) == ABS)
3217 return const0_rtx;
3219 break;
3221 case GE:
3222 /* Optimize abs(x) >= 0.0. */
3223 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3225 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3226 : trueop0;
3227 if (GET_CODE (tem) == ABS)
3228 return const_true_rtx;
3230 break;
3232 case UNGE:
3233 /* Optimize ! (abs(x) < 0.0). */
3234 if (trueop1 == CONST0_RTX (mode))
3236 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3237 : trueop0;
3238 if (GET_CODE (tem) == ABS)
3239 return const_true_rtx;
3241 break;
3243 default:
3244 break;
3247 return 0;
3250 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3251 as appropriate. */
3252 switch (code)
3254 case EQ:
3255 case UNEQ:
3256 return equal ? const_true_rtx : const0_rtx;
3257 case NE:
3258 case LTGT:
3259 return ! equal ? const_true_rtx : const0_rtx;
3260 case LT:
3261 case UNLT:
3262 return op0lt ? const_true_rtx : const0_rtx;
3263 case GT:
3264 case UNGT:
3265 return op1lt ? const_true_rtx : const0_rtx;
3266 case LTU:
3267 return op0ltu ? const_true_rtx : const0_rtx;
3268 case GTU:
3269 return op1ltu ? const_true_rtx : const0_rtx;
3270 case LE:
3271 case UNLE:
3272 return equal || op0lt ? const_true_rtx : const0_rtx;
3273 case GE:
3274 case UNGE:
3275 return equal || op1lt ? const_true_rtx : const0_rtx;
3276 case LEU:
3277 return equal || op0ltu ? const_true_rtx : const0_rtx;
3278 case GEU:
3279 return equal || op1ltu ? const_true_rtx : const0_rtx;
3280 case ORDERED:
3281 return const_true_rtx;
3282 case UNORDERED:
3283 return const0_rtx;
3284 default:
3285 gcc_unreachable ();
3289 /* Simplify CODE, an operation with result mode MODE and three operands,
3290 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3291 a constant. Return 0 if no simplifications is possible. */
3294 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3295 enum machine_mode op0_mode, rtx op0, rtx op1,
3296 rtx op2)
3298 unsigned int width = GET_MODE_BITSIZE (mode);
3300 /* VOIDmode means "infinite" precision. */
3301 if (width == 0)
3302 width = HOST_BITS_PER_WIDE_INT;
3304 switch (code)
3306 case SIGN_EXTRACT:
3307 case ZERO_EXTRACT:
3308 if (GET_CODE (op0) == CONST_INT
3309 && GET_CODE (op1) == CONST_INT
3310 && GET_CODE (op2) == CONST_INT
3311 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3312 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3314 /* Extracting a bit-field from a constant */
3315 HOST_WIDE_INT val = INTVAL (op0);
3317 if (BITS_BIG_ENDIAN)
3318 val >>= (GET_MODE_BITSIZE (op0_mode)
3319 - INTVAL (op2) - INTVAL (op1));
3320 else
3321 val >>= INTVAL (op2);
3323 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3325 /* First zero-extend. */
3326 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3327 /* If desired, propagate sign bit. */
3328 if (code == SIGN_EXTRACT
3329 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3330 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3333 /* Clear the bits that don't belong in our mode,
3334 unless they and our sign bit are all one.
3335 So we get either a reasonable negative value or a reasonable
3336 unsigned value for this mode. */
3337 if (width < HOST_BITS_PER_WIDE_INT
3338 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3339 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3340 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3342 return gen_int_mode (val, mode);
3344 break;
3346 case IF_THEN_ELSE:
3347 if (GET_CODE (op0) == CONST_INT)
3348 return op0 != const0_rtx ? op1 : op2;
3350 /* Convert c ? a : a into "a". */
3351 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3352 return op1;
3354 /* Convert a != b ? a : b into "a". */
3355 if (GET_CODE (op0) == NE
3356 && ! side_effects_p (op0)
3357 && ! HONOR_NANS (mode)
3358 && ! HONOR_SIGNED_ZEROS (mode)
3359 && ((rtx_equal_p (XEXP (op0, 0), op1)
3360 && rtx_equal_p (XEXP (op0, 1), op2))
3361 || (rtx_equal_p (XEXP (op0, 0), op2)
3362 && rtx_equal_p (XEXP (op0, 1), op1))))
3363 return op1;
3365 /* Convert a == b ? a : b into "b". */
3366 if (GET_CODE (op0) == EQ
3367 && ! side_effects_p (op0)
3368 && ! HONOR_NANS (mode)
3369 && ! HONOR_SIGNED_ZEROS (mode)
3370 && ((rtx_equal_p (XEXP (op0, 0), op1)
3371 && rtx_equal_p (XEXP (op0, 1), op2))
3372 || (rtx_equal_p (XEXP (op0, 0), op2)
3373 && rtx_equal_p (XEXP (op0, 1), op1))))
3374 return op2;
3376 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3378 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3379 ? GET_MODE (XEXP (op0, 1))
3380 : GET_MODE (XEXP (op0, 0)));
3381 rtx temp;
3383 /* Look for happy constants in op1 and op2. */
3384 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3386 HOST_WIDE_INT t = INTVAL (op1);
3387 HOST_WIDE_INT f = INTVAL (op2);
3389 if (t == STORE_FLAG_VALUE && f == 0)
3390 code = GET_CODE (op0);
3391 else if (t == 0 && f == STORE_FLAG_VALUE)
3393 enum rtx_code tmp;
3394 tmp = reversed_comparison_code (op0, NULL_RTX);
3395 if (tmp == UNKNOWN)
3396 break;
3397 code = tmp;
3399 else
3400 break;
3402 return simplify_gen_relational (code, mode, cmp_mode,
3403 XEXP (op0, 0), XEXP (op0, 1));
3406 if (cmp_mode == VOIDmode)
3407 cmp_mode = op0_mode;
3408 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3409 cmp_mode, XEXP (op0, 0),
3410 XEXP (op0, 1));
3412 /* See if any simplifications were possible. */
3413 if (temp)
3415 if (GET_CODE (temp) == CONST_INT)
3416 return temp == const0_rtx ? op2 : op1;
3417 else if (temp)
3418 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3421 break;
3423 case VEC_MERGE:
3424 gcc_assert (GET_MODE (op0) == mode);
3425 gcc_assert (GET_MODE (op1) == mode);
3426 gcc_assert (VECTOR_MODE_P (mode));
3427 op2 = avoid_constant_pool_reference (op2);
3428 if (GET_CODE (op2) == CONST_INT)
3430 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3431 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3432 int mask = (1 << n_elts) - 1;
3434 if (!(INTVAL (op2) & mask))
3435 return op1;
3436 if ((INTVAL (op2) & mask) == mask)
3437 return op0;
3439 op0 = avoid_constant_pool_reference (op0);
3440 op1 = avoid_constant_pool_reference (op1);
3441 if (GET_CODE (op0) == CONST_VECTOR
3442 && GET_CODE (op1) == CONST_VECTOR)
3444 rtvec v = rtvec_alloc (n_elts);
3445 unsigned int i;
3447 for (i = 0; i < n_elts; i++)
3448 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3449 ? CONST_VECTOR_ELT (op0, i)
3450 : CONST_VECTOR_ELT (op1, i));
3451 return gen_rtx_CONST_VECTOR (mode, v);
3454 break;
3456 default:
3457 gcc_unreachable ();
3460 return 0;
3463 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3464 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3466 Works by unpacking OP into a collection of 8-bit values
3467 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3468 and then repacking them again for OUTERMODE. */
3470 static rtx
3471 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3472 enum machine_mode innermode, unsigned int byte)
3474 /* We support up to 512-bit values (for V8DFmode). */
3475 enum {
3476 max_bitsize = 512,
3477 value_bit = 8,
3478 value_mask = (1 << value_bit) - 1
3480 unsigned char value[max_bitsize / value_bit];
3481 int value_start;
3482 int i;
3483 int elem;
3485 int num_elem;
3486 rtx * elems;
3487 int elem_bitsize;
3488 rtx result_s;
3489 rtvec result_v = NULL;
3490 enum mode_class outer_class;
3491 enum machine_mode outer_submode;
3493 /* Some ports misuse CCmode. */
3494 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3495 return op;
3497 /* We have no way to represent a complex constant at the rtl level. */
3498 if (COMPLEX_MODE_P (outermode))
3499 return NULL_RTX;
3501 /* Unpack the value. */
3503 if (GET_CODE (op) == CONST_VECTOR)
3505 num_elem = CONST_VECTOR_NUNITS (op);
3506 elems = &CONST_VECTOR_ELT (op, 0);
3507 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3509 else
3511 num_elem = 1;
3512 elems = &op;
3513 elem_bitsize = max_bitsize;
3515 /* If this asserts, it is too complicated; reducing value_bit may help. */
3516 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3517 /* I don't know how to handle endianness of sub-units. */
3518 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3520 for (elem = 0; elem < num_elem; elem++)
3522 unsigned char * vp;
3523 rtx el = elems[elem];
3525 /* Vectors are kept in target memory order. (This is probably
3526 a mistake.) */
3528 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3529 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3530 / BITS_PER_UNIT);
3531 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3532 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3533 unsigned bytele = (subword_byte % UNITS_PER_WORD
3534 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3535 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3538 switch (GET_CODE (el))
3540 case CONST_INT:
3541 for (i = 0;
3542 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3543 i += value_bit)
3544 *vp++ = INTVAL (el) >> i;
3545 /* CONST_INTs are always logically sign-extended. */
3546 for (; i < elem_bitsize; i += value_bit)
3547 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3548 break;
3550 case CONST_DOUBLE:
3551 if (GET_MODE (el) == VOIDmode)
3553 /* If this triggers, someone should have generated a
3554 CONST_INT instead. */
3555 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3557 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3558 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3559 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3561 *vp++
3562 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3563 i += value_bit;
3565 /* It shouldn't matter what's done here, so fill it with
3566 zero. */
3567 for (; i < max_bitsize; i += value_bit)
3568 *vp++ = 0;
3570 else
3572 long tmp[max_bitsize / 32];
3573 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3575 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3576 gcc_assert (bitsize <= elem_bitsize);
3577 gcc_assert (bitsize % value_bit == 0);
3579 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3580 GET_MODE (el));
3582 /* real_to_target produces its result in words affected by
3583 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3584 and use WORDS_BIG_ENDIAN instead; see the documentation
3585 of SUBREG in rtl.texi. */
3586 for (i = 0; i < bitsize; i += value_bit)
3588 int ibase;
3589 if (WORDS_BIG_ENDIAN)
3590 ibase = bitsize - 1 - i;
3591 else
3592 ibase = i;
3593 *vp++ = tmp[ibase / 32] >> i % 32;
3596 /* It shouldn't matter what's done here, so fill it with
3597 zero. */
3598 for (; i < elem_bitsize; i += value_bit)
3599 *vp++ = 0;
3601 break;
3603 default:
3604 gcc_unreachable ();
3608 /* Now, pick the right byte to start with. */
3609 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3610 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3611 will already have offset 0. */
3612 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3614 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3615 - byte);
3616 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3617 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3618 byte = (subword_byte % UNITS_PER_WORD
3619 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3622 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3623 so if it's become negative it will instead be very large.) */
3624 gcc_assert (byte < GET_MODE_SIZE (innermode));
3626 /* Convert from bytes to chunks of size value_bit. */
3627 value_start = byte * (BITS_PER_UNIT / value_bit);
3629 /* Re-pack the value. */
3631 if (VECTOR_MODE_P (outermode))
3633 num_elem = GET_MODE_NUNITS (outermode);
3634 result_v = rtvec_alloc (num_elem);
3635 elems = &RTVEC_ELT (result_v, 0);
3636 outer_submode = GET_MODE_INNER (outermode);
3638 else
3640 num_elem = 1;
3641 elems = &result_s;
3642 outer_submode = outermode;
3645 outer_class = GET_MODE_CLASS (outer_submode);
3646 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3648 gcc_assert (elem_bitsize % value_bit == 0);
3649 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3651 for (elem = 0; elem < num_elem; elem++)
3653 unsigned char *vp;
3655 /* Vectors are stored in target memory order. (This is probably
3656 a mistake.) */
3658 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3659 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3660 / BITS_PER_UNIT);
3661 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3662 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3663 unsigned bytele = (subword_byte % UNITS_PER_WORD
3664 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3665 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3668 switch (outer_class)
3670 case MODE_INT:
3671 case MODE_PARTIAL_INT:
3673 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3675 for (i = 0;
3676 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3677 i += value_bit)
3678 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3679 for (; i < elem_bitsize; i += value_bit)
3680 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3681 << (i - HOST_BITS_PER_WIDE_INT));
3683 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3684 know why. */
3685 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3686 elems[elem] = gen_int_mode (lo, outer_submode);
3687 else
3688 elems[elem] = immed_double_const (lo, hi, outer_submode);
3690 break;
3692 case MODE_FLOAT:
3694 REAL_VALUE_TYPE r;
3695 long tmp[max_bitsize / 32];
3697 /* real_from_target wants its input in words affected by
3698 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3699 and use WORDS_BIG_ENDIAN instead; see the documentation
3700 of SUBREG in rtl.texi. */
3701 for (i = 0; i < max_bitsize / 32; i++)
3702 tmp[i] = 0;
3703 for (i = 0; i < elem_bitsize; i += value_bit)
3705 int ibase;
3706 if (WORDS_BIG_ENDIAN)
3707 ibase = elem_bitsize - 1 - i;
3708 else
3709 ibase = i;
3710 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3713 real_from_target (&r, tmp, outer_submode);
3714 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3716 break;
3718 default:
3719 gcc_unreachable ();
3722 if (VECTOR_MODE_P (outermode))
3723 return gen_rtx_CONST_VECTOR (outermode, result_v);
3724 else
3725 return result_s;
3728 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3729 Return 0 if no simplifications are possible. */
3731 simplify_subreg (enum machine_mode outermode, rtx op,
3732 enum machine_mode innermode, unsigned int byte)
3734 /* Little bit of sanity checking. */
3735 gcc_assert (innermode != VOIDmode);
3736 gcc_assert (outermode != VOIDmode);
3737 gcc_assert (innermode != BLKmode);
3738 gcc_assert (outermode != BLKmode);
3740 gcc_assert (GET_MODE (op) == innermode
3741 || GET_MODE (op) == VOIDmode);
3743 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3744 gcc_assert (byte < GET_MODE_SIZE (innermode));
3746 if (outermode == innermode && !byte)
3747 return op;
3749 if (GET_CODE (op) == CONST_INT
3750 || GET_CODE (op) == CONST_DOUBLE
3751 || GET_CODE (op) == CONST_VECTOR)
3752 return simplify_immed_subreg (outermode, op, innermode, byte);
3754 /* Changing mode twice with SUBREG => just change it once,
3755 or not at all if changing back op starting mode. */
3756 if (GET_CODE (op) == SUBREG)
3758 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3759 int final_offset = byte + SUBREG_BYTE (op);
3760 rtx newx;
3762 if (outermode == innermostmode
3763 && byte == 0 && SUBREG_BYTE (op) == 0)
3764 return SUBREG_REG (op);
3766 /* The SUBREG_BYTE represents offset, as if the value were stored
3767 in memory. Irritating exception is paradoxical subreg, where
3768 we define SUBREG_BYTE to be 0. On big endian machines, this
3769 value should be negative. For a moment, undo this exception. */
3770 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3772 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3773 if (WORDS_BIG_ENDIAN)
3774 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3775 if (BYTES_BIG_ENDIAN)
3776 final_offset += difference % UNITS_PER_WORD;
3778 if (SUBREG_BYTE (op) == 0
3779 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3781 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3782 if (WORDS_BIG_ENDIAN)
3783 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3784 if (BYTES_BIG_ENDIAN)
3785 final_offset += difference % UNITS_PER_WORD;
3788 /* See whether resulting subreg will be paradoxical. */
3789 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3791 /* In nonparadoxical subregs we can't handle negative offsets. */
3792 if (final_offset < 0)
3793 return NULL_RTX;
3794 /* Bail out in case resulting subreg would be incorrect. */
3795 if (final_offset % GET_MODE_SIZE (outermode)
3796 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3797 return NULL_RTX;
3799 else
3801 int offset = 0;
3802 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3804 /* In paradoxical subreg, see if we are still looking on lower part.
3805 If so, our SUBREG_BYTE will be 0. */
3806 if (WORDS_BIG_ENDIAN)
3807 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3808 if (BYTES_BIG_ENDIAN)
3809 offset += difference % UNITS_PER_WORD;
3810 if (offset == final_offset)
3811 final_offset = 0;
3812 else
3813 return NULL_RTX;
3816 /* Recurse for further possible simplifications. */
3817 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3818 final_offset);
3819 if (newx)
3820 return newx;
3821 if (validate_subreg (outermode, innermostmode,
3822 SUBREG_REG (op), final_offset))
3823 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3824 return NULL_RTX;
3827 /* SUBREG of a hard register => just change the register number
3828 and/or mode. If the hard register is not valid in that mode,
3829 suppress this simplification. If the hard register is the stack,
3830 frame, or argument pointer, leave this as a SUBREG. */
3832 if (REG_P (op)
3833 && REGNO (op) < FIRST_PSEUDO_REGISTER
3834 #ifdef CANNOT_CHANGE_MODE_CLASS
3835 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3836 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3837 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3838 #endif
3839 && ((reload_completed && !frame_pointer_needed)
3840 || (REGNO (op) != FRAME_POINTER_REGNUM
3841 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3842 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3843 #endif
3845 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3846 && REGNO (op) != ARG_POINTER_REGNUM
3847 #endif
3848 && REGNO (op) != STACK_POINTER_REGNUM
3849 && subreg_offset_representable_p (REGNO (op), innermode,
3850 byte, outermode))
3852 unsigned int regno = REGNO (op);
3853 unsigned int final_regno
3854 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3856 /* ??? We do allow it if the current REG is not valid for
3857 its mode. This is a kludge to work around how float/complex
3858 arguments are passed on 32-bit SPARC and should be fixed. */
3859 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3860 || ! HARD_REGNO_MODE_OK (regno, innermode))
3862 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3864 /* Propagate original regno. We don't have any way to specify
3865 the offset inside original regno, so do so only for lowpart.
3866 The information is used only by alias analysis that can not
3867 grog partial register anyway. */
3869 if (subreg_lowpart_offset (outermode, innermode) == byte)
3870 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3871 return x;
3875 /* If we have a SUBREG of a register that we are replacing and we are
3876 replacing it with a MEM, make a new MEM and try replacing the
3877 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3878 or if we would be widening it. */
3880 if (MEM_P (op)
3881 && ! mode_dependent_address_p (XEXP (op, 0))
3882 /* Allow splitting of volatile memory references in case we don't
3883 have instruction to move the whole thing. */
3884 && (! MEM_VOLATILE_P (op)
3885 || ! have_insn_for (SET, innermode))
3886 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3887 return adjust_address_nv (op, outermode, byte);
3889 /* Handle complex values represented as CONCAT
3890 of real and imaginary part. */
3891 if (GET_CODE (op) == CONCAT)
3893 unsigned int inner_size, final_offset;
3894 rtx part, res;
3896 inner_size = GET_MODE_UNIT_SIZE (innermode);
3897 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3898 final_offset = byte % inner_size;
3899 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3900 return NULL_RTX;
3902 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3903 if (res)
3904 return res;
3905 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3906 return gen_rtx_SUBREG (outermode, part, final_offset);
3907 return NULL_RTX;
3910 /* Optimize SUBREG truncations of zero and sign extended values. */
3911 if ((GET_CODE (op) == ZERO_EXTEND
3912 || GET_CODE (op) == SIGN_EXTEND)
3913 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3915 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3917 /* If we're requesting the lowpart of a zero or sign extension,
3918 there are three possibilities. If the outermode is the same
3919 as the origmode, we can omit both the extension and the subreg.
3920 If the outermode is not larger than the origmode, we can apply
3921 the truncation without the extension. Finally, if the outermode
3922 is larger than the origmode, but both are integer modes, we
3923 can just extend to the appropriate mode. */
3924 if (bitpos == 0)
3926 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3927 if (outermode == origmode)
3928 return XEXP (op, 0);
3929 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3930 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3931 subreg_lowpart_offset (outermode,
3932 origmode));
3933 if (SCALAR_INT_MODE_P (outermode))
3934 return simplify_gen_unary (GET_CODE (op), outermode,
3935 XEXP (op, 0), origmode);
3938 /* A SUBREG resulting from a zero extension may fold to zero if
3939 it extracts higher bits that the ZERO_EXTEND's source bits. */
3940 if (GET_CODE (op) == ZERO_EXTEND
3941 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3942 return CONST0_RTX (outermode);
3945 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3946 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3947 the outer subreg is effectively a truncation to the original mode. */
3948 if ((GET_CODE (op) == LSHIFTRT
3949 || GET_CODE (op) == ASHIFTRT)
3950 && SCALAR_INT_MODE_P (outermode)
3951 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3952 to avoid the possibility that an outer LSHIFTRT shifts by more
3953 than the sign extension's sign_bit_copies and introduces zeros
3954 into the high bits of the result. */
3955 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
3956 && GET_CODE (XEXP (op, 1)) == CONST_INT
3957 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
3958 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3959 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3960 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3961 return simplify_gen_binary (ASHIFTRT, outermode,
3962 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3964 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
3965 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
3966 the outer subreg is effectively a truncation to the original mode. */
3967 if ((GET_CODE (op) == LSHIFTRT
3968 || GET_CODE (op) == ASHIFTRT)
3969 && SCALAR_INT_MODE_P (outermode)
3970 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3971 && GET_CODE (XEXP (op, 1)) == CONST_INT
3972 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3973 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3974 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3975 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3976 return simplify_gen_binary (LSHIFTRT, outermode,
3977 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3979 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
3980 to (ashift:QI (x:QI) C), where C is a suitable small constant and
3981 the outer subreg is effectively a truncation to the original mode. */
3982 if (GET_CODE (op) == ASHIFT
3983 && SCALAR_INT_MODE_P (outermode)
3984 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3985 && GET_CODE (XEXP (op, 1)) == CONST_INT
3986 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3987 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
3988 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3989 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3990 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3991 return simplify_gen_binary (ASHIFT, outermode,
3992 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3994 return NULL_RTX;
3997 /* Make a SUBREG operation or equivalent if it folds. */
4000 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4001 enum machine_mode innermode, unsigned int byte)
4003 rtx newx;
4005 newx = simplify_subreg (outermode, op, innermode, byte);
4006 if (newx)
4007 return newx;
4009 if (GET_CODE (op) == SUBREG
4010 || GET_CODE (op) == CONCAT
4011 || GET_MODE (op) == VOIDmode)
4012 return NULL_RTX;
4014 if (validate_subreg (outermode, innermode, op, byte))
4015 return gen_rtx_SUBREG (outermode, op, byte);
4017 return NULL_RTX;
4020 /* Simplify X, an rtx expression.
4022 Return the simplified expression or NULL if no simplifications
4023 were possible.
4025 This is the preferred entry point into the simplification routines;
4026 however, we still allow passes to call the more specific routines.
4028 Right now GCC has three (yes, three) major bodies of RTL simplification
4029 code that need to be unified.
4031 1. fold_rtx in cse.c. This code uses various CSE specific
4032 information to aid in RTL simplification.
4034 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4035 it uses combine specific information to aid in RTL
4036 simplification.
4038 3. The routines in this file.
4041 Long term we want to only have one body of simplification code; to
4042 get to that state I recommend the following steps:
4044 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4045 which are not pass dependent state into these routines.
4047 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4048 use this routine whenever possible.
4050 3. Allow for pass dependent state to be provided to these
4051 routines and add simplifications based on the pass dependent
4052 state. Remove code from cse.c & combine.c that becomes
4053 redundant/dead.
4055 It will take time, but ultimately the compiler will be easier to
4056 maintain and improve. It's totally silly that when we add a
4057 simplification that it needs to be added to 4 places (3 for RTL
4058 simplification and 1 for tree simplification. */
4061 simplify_rtx (rtx x)
4063 enum rtx_code code = GET_CODE (x);
4064 enum machine_mode mode = GET_MODE (x);
4066 switch (GET_RTX_CLASS (code))
4068 case RTX_UNARY:
4069 return simplify_unary_operation (code, mode,
4070 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4071 case RTX_COMM_ARITH:
4072 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4073 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4075 /* Fall through.... */
4077 case RTX_BIN_ARITH:
4078 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4080 case RTX_TERNARY:
4081 case RTX_BITFIELD_OPS:
4082 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4083 XEXP (x, 0), XEXP (x, 1),
4084 XEXP (x, 2));
4086 case RTX_COMPARE:
4087 case RTX_COMM_COMPARE:
4088 return simplify_relational_operation (code, mode,
4089 ((GET_MODE (XEXP (x, 0))
4090 != VOIDmode)
4091 ? GET_MODE (XEXP (x, 0))
4092 : GET_MODE (XEXP (x, 1))),
4093 XEXP (x, 0),
4094 XEXP (x, 1));
4096 case RTX_EXTRA:
4097 if (code == SUBREG)
4098 return simplify_gen_subreg (mode, SUBREG_REG (x),
4099 GET_MODE (SUBREG_REG (x)),
4100 SUBREG_BYTE (x));
4101 break;
4103 case RTX_OBJ:
4104 if (code == LO_SUM)
4106 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4107 if (GET_CODE (XEXP (x, 0)) == HIGH
4108 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4109 return XEXP (x, 1);
4111 break;
4113 default:
4114 break;
4116 return NULL;