Merge from mainline (gomp-merge-2005-02-26).
[official-gcc.git] / gcc / simplify-rtx.c
bloba2cd612f92d5456fcb4b8518b66d9e8a8ba401fd
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, rtx i)
69 return gen_int_mode (- INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (enum machine_mode mode, rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_BITSIZE (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x) == CONST_INT)
90 val = INTVAL (x);
91 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
98 else
99 return false;
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
110 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
111 rtx op1)
113 rtx tem;
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
122 if (tem)
123 return tem;
125 /* Handle addition and subtraction specially. Otherwise, just form
126 the operation. */
128 if (code == PLUS || code == MINUS)
130 tem = simplify_plus_minus (code, mode, op0, op1, 1);
131 if (tem)
132 return tem;
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
141 avoid_constant_pool_reference (rtx x)
143 rtx c, tmp, addr;
144 enum machine_mode cmode;
146 switch (GET_CODE (x))
148 case MEM:
149 break;
151 case FLOAT_EXTEND:
152 /* Handle float extensions of constant pool references. */
153 tmp = XEXP (x, 0);
154 c = avoid_constant_pool_reference (tmp);
155 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
157 REAL_VALUE_TYPE d;
159 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
162 return x;
164 default:
165 return x;
168 addr = XEXP (x, 0);
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr = targetm.delegitimize_address (addr);
173 if (GET_CODE (addr) == LO_SUM)
174 addr = XEXP (addr, 1);
176 if (GET_CODE (addr) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr))
178 return x;
180 c = get_pool_constant (addr);
181 cmode = get_pool_mode (addr);
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode != GET_MODE (x))
188 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
189 return c ? c : x;
192 return c;
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
199 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
200 enum machine_mode op_mode)
202 rtx tem;
204 /* If this simplifies, use it. */
205 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
206 return tem;
208 return gen_rtx_fmt_e (code, mode, op);
211 /* Likewise for ternary operations. */
214 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
215 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
217 rtx tem;
219 /* If this simplifies, use it. */
220 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
221 op0, op1, op2)))
222 return tem;
224 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
231 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
232 enum machine_mode cmp_mode, rtx op0, rtx op1)
234 rtx tem;
236 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
237 op0, op1)))
238 return tem;
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
243 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251 enum machine_mode op_mode;
252 rtx op0, op1, op2;
254 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
258 if (x == old_rtx)
259 return new_rtx;
261 switch (GET_RTX_CLASS (code))
263 case RTX_UNARY:
264 op0 = XEXP (x, 0);
265 op_mode = GET_MODE (op0);
266 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
267 if (op0 == XEXP (x, 0))
268 return x;
269 return simplify_gen_unary (code, mode, op0, op_mode);
271 case RTX_BIN_ARITH:
272 case RTX_COMM_ARITH:
273 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
274 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
275 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
276 return x;
277 return simplify_gen_binary (code, mode, op0, op1);
279 case RTX_COMPARE:
280 case RTX_COMM_COMPARE:
281 op0 = XEXP (x, 0);
282 op1 = XEXP (x, 1);
283 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
284 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return x;
288 return simplify_gen_relational (code, mode, op_mode, op0, op1);
290 case RTX_TERNARY:
291 case RTX_BITFIELD_OPS:
292 op0 = XEXP (x, 0);
293 op_mode = GET_MODE (op0);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
296 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
298 return x;
299 if (op_mode == VOIDmode)
300 op_mode = GET_MODE (op0);
301 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
303 case RTX_EXTRA:
304 /* The only case we try to handle is a SUBREG. */
305 if (code == SUBREG)
307 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
308 if (op0 == SUBREG_REG (x))
309 return x;
310 op0 = simplify_gen_subreg (GET_MODE (x), op0,
311 GET_MODE (SUBREG_REG (x)),
312 SUBREG_BYTE (x));
313 return op0 ? op0 : x;
315 break;
317 case RTX_OBJ:
318 if (code == MEM)
320 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
321 if (op0 == XEXP (x, 0))
322 return x;
323 return replace_equiv_address_nv (x, op0);
325 else if (code == LO_SUM)
327 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
328 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
332 return op1;
334 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
335 return x;
336 return gen_rtx_LO_SUM (mode, op0, op1);
338 else if (code == REG)
340 if (rtx_equal_p (x, old_rtx))
341 return new_rtx;
343 break;
345 default:
346 break;
348 return x;
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
355 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
356 rtx op, enum machine_mode op_mode)
358 unsigned int width = GET_MODE_BITSIZE (mode);
359 rtx trueop = avoid_constant_pool_reference (op);
361 if (code == VEC_DUPLICATE)
363 gcc_assert (VECTOR_MODE_P (mode));
364 if (GET_MODE (trueop) != VOIDmode)
366 if (!VECTOR_MODE_P (GET_MODE (trueop)))
367 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
368 else
369 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
370 (GET_MODE (trueop)));
372 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
373 || GET_CODE (trueop) == CONST_VECTOR)
375 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
376 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
377 rtvec v = rtvec_alloc (n_elts);
378 unsigned int i;
380 if (GET_CODE (trueop) != CONST_VECTOR)
381 for (i = 0; i < n_elts; i++)
382 RTVEC_ELT (v, i) = trueop;
383 else
385 enum machine_mode inmode = GET_MODE (trueop);
386 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
387 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
389 gcc_assert (in_n_elts < n_elts);
390 gcc_assert ((n_elts % in_n_elts) == 0);
391 for (i = 0; i < n_elts; i++)
392 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
394 return gen_rtx_CONST_VECTOR (mode, v);
397 else if (GET_CODE (op) == CONST)
398 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
400 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
402 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
403 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
404 enum machine_mode opmode = GET_MODE (trueop);
405 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
406 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
407 rtvec v = rtvec_alloc (n_elts);
408 unsigned int i;
410 gcc_assert (op_n_elts == n_elts);
411 for (i = 0; i < n_elts; i++)
413 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
414 CONST_VECTOR_ELT (trueop, i),
415 GET_MODE_INNER (opmode));
416 if (!x)
417 return 0;
418 RTVEC_ELT (v, i) = x;
420 return gen_rtx_CONST_VECTOR (mode, v);
423 /* The order of these tests is critical so that, for example, we don't
424 check the wrong mode (input vs. output) for a conversion operation,
425 such as FIX. At some point, this should be simplified. */
427 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
428 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
430 HOST_WIDE_INT hv, lv;
431 REAL_VALUE_TYPE d;
433 if (GET_CODE (trueop) == CONST_INT)
434 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
435 else
436 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
438 REAL_VALUE_FROM_INT (d, lv, hv, mode);
439 d = real_value_truncate (mode, d);
440 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
442 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
443 && (GET_CODE (trueop) == CONST_DOUBLE
444 || GET_CODE (trueop) == CONST_INT))
446 HOST_WIDE_INT hv, lv;
447 REAL_VALUE_TYPE d;
449 if (GET_CODE (trueop) == CONST_INT)
450 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
451 else
452 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
454 if (op_mode == VOIDmode)
456 /* We don't know how to interpret negative-looking numbers in
457 this case, so don't try to fold those. */
458 if (hv < 0)
459 return 0;
461 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
463 else
464 hv = 0, lv &= GET_MODE_MASK (op_mode);
466 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
467 d = real_value_truncate (mode, d);
468 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
471 if (GET_CODE (trueop) == CONST_INT
472 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
474 HOST_WIDE_INT arg0 = INTVAL (trueop);
475 HOST_WIDE_INT val;
477 switch (code)
479 case NOT:
480 val = ~ arg0;
481 break;
483 case NEG:
484 val = - arg0;
485 break;
487 case ABS:
488 val = (arg0 >= 0 ? arg0 : - arg0);
489 break;
491 case FFS:
492 /* Don't use ffs here. Instead, get low order bit and then its
493 number. If arg0 is zero, this will return 0, as desired. */
494 arg0 &= GET_MODE_MASK (mode);
495 val = exact_log2 (arg0 & (- arg0)) + 1;
496 break;
498 case CLZ:
499 arg0 &= GET_MODE_MASK (mode);
500 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
502 else
503 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
504 break;
506 case CTZ:
507 arg0 &= GET_MODE_MASK (mode);
508 if (arg0 == 0)
510 /* Even if the value at zero is undefined, we have to come
511 up with some replacement. Seems good enough. */
512 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
513 val = GET_MODE_BITSIZE (mode);
515 else
516 val = exact_log2 (arg0 & -arg0);
517 break;
519 case POPCOUNT:
520 arg0 &= GET_MODE_MASK (mode);
521 val = 0;
522 while (arg0)
523 val++, arg0 &= arg0 - 1;
524 break;
526 case PARITY:
527 arg0 &= GET_MODE_MASK (mode);
528 val = 0;
529 while (arg0)
530 val++, arg0 &= arg0 - 1;
531 val &= 1;
532 break;
534 case TRUNCATE:
535 val = arg0;
536 break;
538 case ZERO_EXTEND:
539 /* When zero-extending a CONST_INT, we need to know its
540 original mode. */
541 gcc_assert (op_mode != VOIDmode);
542 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
544 /* If we were really extending the mode,
545 we would have to distinguish between zero-extension
546 and sign-extension. */
547 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
548 val = arg0;
550 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
551 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
552 else
553 return 0;
554 break;
556 case SIGN_EXTEND:
557 if (op_mode == VOIDmode)
558 op_mode = mode;
559 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
561 /* If we were really extending the mode,
562 we would have to distinguish between zero-extension
563 and sign-extension. */
564 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
565 val = arg0;
567 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
570 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
571 if (val
572 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
573 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
575 else
576 return 0;
577 break;
579 case SQRT:
580 case FLOAT_EXTEND:
581 case FLOAT_TRUNCATE:
582 case SS_TRUNCATE:
583 case US_TRUNCATE:
584 return 0;
586 default:
587 gcc_unreachable ();
590 val = trunc_int_for_mode (val, mode);
592 return GEN_INT (val);
595 /* We can do some operations on integer CONST_DOUBLEs. Also allow
596 for a DImode operation on a CONST_INT. */
597 else if (GET_MODE (trueop) == VOIDmode
598 && width <= HOST_BITS_PER_WIDE_INT * 2
599 && (GET_CODE (trueop) == CONST_DOUBLE
600 || GET_CODE (trueop) == CONST_INT))
602 unsigned HOST_WIDE_INT l1, lv;
603 HOST_WIDE_INT h1, hv;
605 if (GET_CODE (trueop) == CONST_DOUBLE)
606 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
607 else
608 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
610 switch (code)
612 case NOT:
613 lv = ~ l1;
614 hv = ~ h1;
615 break;
617 case NEG:
618 neg_double (l1, h1, &lv, &hv);
619 break;
621 case ABS:
622 if (h1 < 0)
623 neg_double (l1, h1, &lv, &hv);
624 else
625 lv = l1, hv = h1;
626 break;
628 case FFS:
629 hv = 0;
630 if (l1 == 0)
632 if (h1 == 0)
633 lv = 0;
634 else
635 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
637 else
638 lv = exact_log2 (l1 & -l1) + 1;
639 break;
641 case CLZ:
642 hv = 0;
643 if (h1 != 0)
644 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
645 - HOST_BITS_PER_WIDE_INT;
646 else if (l1 != 0)
647 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
648 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
649 lv = GET_MODE_BITSIZE (mode);
650 break;
652 case CTZ:
653 hv = 0;
654 if (l1 != 0)
655 lv = exact_log2 (l1 & -l1);
656 else if (h1 != 0)
657 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
658 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
659 lv = GET_MODE_BITSIZE (mode);
660 break;
662 case POPCOUNT:
663 hv = 0;
664 lv = 0;
665 while (l1)
666 lv++, l1 &= l1 - 1;
667 while (h1)
668 lv++, h1 &= h1 - 1;
669 break;
671 case PARITY:
672 hv = 0;
673 lv = 0;
674 while (l1)
675 lv++, l1 &= l1 - 1;
676 while (h1)
677 lv++, h1 &= h1 - 1;
678 lv &= 1;
679 break;
681 case TRUNCATE:
682 /* This is just a change-of-mode, so do nothing. */
683 lv = l1, hv = h1;
684 break;
686 case ZERO_EXTEND:
687 gcc_assert (op_mode != VOIDmode);
689 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
690 return 0;
692 hv = 0;
693 lv = l1 & GET_MODE_MASK (op_mode);
694 break;
696 case SIGN_EXTEND:
697 if (op_mode == VOIDmode
698 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
699 return 0;
700 else
702 lv = l1 & GET_MODE_MASK (op_mode);
703 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
704 && (lv & ((HOST_WIDE_INT) 1
705 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
706 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
708 hv = HWI_SIGN_EXTEND (lv);
710 break;
712 case SQRT:
713 return 0;
715 default:
716 return 0;
719 return immed_double_const (lv, hv, mode);
722 else if (GET_CODE (trueop) == CONST_DOUBLE
723 && GET_MODE_CLASS (mode) == MODE_FLOAT)
725 REAL_VALUE_TYPE d, t;
726 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
728 switch (code)
730 case SQRT:
731 if (HONOR_SNANS (mode) && real_isnan (&d))
732 return 0;
733 real_sqrt (&t, mode, &d);
734 d = t;
735 break;
736 case ABS:
737 d = REAL_VALUE_ABS (d);
738 break;
739 case NEG:
740 d = REAL_VALUE_NEGATE (d);
741 break;
742 case FLOAT_TRUNCATE:
743 d = real_value_truncate (mode, d);
744 break;
745 case FLOAT_EXTEND:
746 /* All this does is change the mode. */
747 break;
748 case FIX:
749 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
750 break;
751 case NOT:
753 long tmp[4];
754 int i;
756 real_to_target (tmp, &d, GET_MODE (trueop));
757 for (i = 0; i < 4; i++)
758 tmp[i] = ~tmp[i];
759 real_from_target (&d, tmp, mode);
761 break;
762 default:
763 gcc_unreachable ();
765 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
768 else if (GET_CODE (trueop) == CONST_DOUBLE
769 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
770 && GET_MODE_CLASS (mode) == MODE_INT
771 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
773 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
774 operators are intentionally left unspecified (to ease implementation
775 by target backends), for consistency, this routine implements the
776 same semantics for constant folding as used by the middle-end. */
778 HOST_WIDE_INT xh, xl, th, tl;
779 REAL_VALUE_TYPE x, t;
780 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
781 switch (code)
783 case FIX:
784 if (REAL_VALUE_ISNAN (x))
785 return const0_rtx;
787 /* Test against the signed upper bound. */
788 if (width > HOST_BITS_PER_WIDE_INT)
790 th = ((unsigned HOST_WIDE_INT) 1
791 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
792 tl = -1;
794 else
796 th = 0;
797 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
799 real_from_integer (&t, VOIDmode, tl, th, 0);
800 if (REAL_VALUES_LESS (t, x))
802 xh = th;
803 xl = tl;
804 break;
807 /* Test against the signed lower bound. */
808 if (width > HOST_BITS_PER_WIDE_INT)
810 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
811 tl = 0;
813 else
815 th = -1;
816 tl = (HOST_WIDE_INT) -1 << (width - 1);
818 real_from_integer (&t, VOIDmode, tl, th, 0);
819 if (REAL_VALUES_LESS (x, t))
821 xh = th;
822 xl = tl;
823 break;
825 REAL_VALUE_TO_INT (&xl, &xh, x);
826 break;
828 case UNSIGNED_FIX:
829 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
830 return const0_rtx;
832 /* Test against the unsigned upper bound. */
833 if (width == 2*HOST_BITS_PER_WIDE_INT)
835 th = -1;
836 tl = -1;
838 else if (width >= HOST_BITS_PER_WIDE_INT)
840 th = ((unsigned HOST_WIDE_INT) 1
841 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
842 tl = -1;
844 else
846 th = 0;
847 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
849 real_from_integer (&t, VOIDmode, tl, th, 1);
850 if (REAL_VALUES_LESS (t, x))
852 xh = th;
853 xl = tl;
854 break;
857 REAL_VALUE_TO_INT (&xl, &xh, x);
858 break;
860 default:
861 gcc_unreachable ();
863 return immed_double_const (xl, xh, mode);
866 /* This was formerly used only for non-IEEE float.
867 eggert@twinsun.com says it is safe for IEEE also. */
868 else
870 enum rtx_code reversed;
871 rtx temp;
873 /* There are some simplifications we can do even if the operands
874 aren't constant. */
875 switch (code)
877 case NOT:
878 /* (not (not X)) == X. */
879 if (GET_CODE (op) == NOT)
880 return XEXP (op, 0);
882 /* (not (eq X Y)) == (ne X Y), etc. */
883 if (COMPARISON_P (op)
884 && (mode == BImode || STORE_FLAG_VALUE == -1)
885 && ((reversed = reversed_comparison_code (op, NULL_RTX))
886 != UNKNOWN))
887 return simplify_gen_relational (reversed, mode, VOIDmode,
888 XEXP (op, 0), XEXP (op, 1));
890 /* (not (plus X -1)) can become (neg X). */
891 if (GET_CODE (op) == PLUS
892 && XEXP (op, 1) == constm1_rtx)
893 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
895 /* Similarly, (not (neg X)) is (plus X -1). */
896 if (GET_CODE (op) == NEG)
897 return plus_constant (XEXP (op, 0), -1);
899 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
900 if (GET_CODE (op) == XOR
901 && GET_CODE (XEXP (op, 1)) == CONST_INT
902 && (temp = simplify_unary_operation (NOT, mode,
903 XEXP (op, 1),
904 mode)) != 0)
905 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
907 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
908 if (GET_CODE (op) == PLUS
909 && GET_CODE (XEXP (op, 1)) == CONST_INT
910 && mode_signbit_p (mode, XEXP (op, 1))
911 && (temp = simplify_unary_operation (NOT, mode,
912 XEXP (op, 1),
913 mode)) != 0)
914 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
918 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
919 operands other than 1, but that is not valid. We could do a
920 similar simplification for (not (lshiftrt C X)) where C is
921 just the sign bit, but this doesn't seem common enough to
922 bother with. */
923 if (GET_CODE (op) == ASHIFT
924 && XEXP (op, 0) == const1_rtx)
926 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
927 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
930 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
931 by reversing the comparison code if valid. */
932 if (STORE_FLAG_VALUE == -1
933 && COMPARISON_P (op)
934 && (reversed = reversed_comparison_code (op, NULL_RTX))
935 != UNKNOWN)
936 return simplify_gen_relational (reversed, mode, VOIDmode,
937 XEXP (op, 0), XEXP (op, 1));
939 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
940 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
941 so we can perform the above simplification. */
943 if (STORE_FLAG_VALUE == -1
944 && GET_CODE (op) == ASHIFTRT
945 && GET_CODE (XEXP (op, 1)) == CONST_INT
946 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
947 return simplify_gen_relational (GE, mode, VOIDmode,
948 XEXP (op, 0), const0_rtx);
950 break;
952 case NEG:
953 /* (neg (neg X)) == X. */
954 if (GET_CODE (op) == NEG)
955 return XEXP (op, 0);
957 /* (neg (plus X 1)) can become (not X). */
958 if (GET_CODE (op) == PLUS
959 && XEXP (op, 1) == const1_rtx)
960 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
962 /* Similarly, (neg (not X)) is (plus X 1). */
963 if (GET_CODE (op) == NOT)
964 return plus_constant (XEXP (op, 0), 1);
966 /* (neg (minus X Y)) can become (minus Y X). This transformation
967 isn't safe for modes with signed zeros, since if X and Y are
968 both +0, (minus Y X) is the same as (minus X Y). If the
969 rounding mode is towards +infinity (or -infinity) then the two
970 expressions will be rounded differently. */
971 if (GET_CODE (op) == MINUS
972 && !HONOR_SIGNED_ZEROS (mode)
973 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
974 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
975 XEXP (op, 0));
977 if (GET_CODE (op) == PLUS
978 && !HONOR_SIGNED_ZEROS (mode)
979 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
981 /* (neg (plus A C)) is simplified to (minus -C A). */
982 if (GET_CODE (XEXP (op, 1)) == CONST_INT
983 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
985 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
986 mode);
987 if (temp)
988 return simplify_gen_binary (MINUS, mode, temp,
989 XEXP (op, 0));
992 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
993 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
994 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
997 /* (neg (mult A B)) becomes (mult (neg A) B).
998 This works even for floating-point values. */
999 if (GET_CODE (op) == MULT
1000 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1002 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1003 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1006 /* NEG commutes with ASHIFT since it is multiplication. Only do
1007 this if we can then eliminate the NEG (e.g., if the operand
1008 is a constant). */
1009 if (GET_CODE (op) == ASHIFT)
1011 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1012 mode);
1013 if (temp)
1014 return simplify_gen_binary (ASHIFT, mode, temp,
1015 XEXP (op, 1));
1018 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1019 C is equal to the width of MODE minus 1. */
1020 if (GET_CODE (op) == ASHIFTRT
1021 && GET_CODE (XEXP (op, 1)) == CONST_INT
1022 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1023 return simplify_gen_binary (LSHIFTRT, mode,
1024 XEXP (op, 0), XEXP (op, 1));
1026 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1027 C is equal to the width of MODE minus 1. */
1028 if (GET_CODE (op) == LSHIFTRT
1029 && GET_CODE (XEXP (op, 1)) == CONST_INT
1030 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1031 return simplify_gen_binary (ASHIFTRT, mode,
1032 XEXP (op, 0), XEXP (op, 1));
1034 break;
1036 case SIGN_EXTEND:
1037 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1038 becomes just the MINUS if its mode is MODE. This allows
1039 folding switch statements on machines using casesi (such as
1040 the VAX). */
1041 if (GET_CODE (op) == TRUNCATE
1042 && GET_MODE (XEXP (op, 0)) == mode
1043 && GET_CODE (XEXP (op, 0)) == MINUS
1044 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1045 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1046 return XEXP (op, 0);
1048 /* Check for a sign extension of a subreg of a promoted
1049 variable, where the promotion is sign-extended, and the
1050 target mode is the same as the variable's promotion. */
1051 if (GET_CODE (op) == SUBREG
1052 && SUBREG_PROMOTED_VAR_P (op)
1053 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1054 && GET_MODE (XEXP (op, 0)) == mode)
1055 return XEXP (op, 0);
1057 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1058 if (! POINTERS_EXTEND_UNSIGNED
1059 && mode == Pmode && GET_MODE (op) == ptr_mode
1060 && (CONSTANT_P (op)
1061 || (GET_CODE (op) == SUBREG
1062 && REG_P (SUBREG_REG (op))
1063 && REG_POINTER (SUBREG_REG (op))
1064 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1065 return convert_memory_address (Pmode, op);
1066 #endif
1067 break;
1069 case ZERO_EXTEND:
1070 /* Check for a zero extension of a subreg of a promoted
1071 variable, where the promotion is zero-extended, and the
1072 target mode is the same as the variable's promotion. */
1073 if (GET_CODE (op) == SUBREG
1074 && SUBREG_PROMOTED_VAR_P (op)
1075 && SUBREG_PROMOTED_UNSIGNED_P (op)
1076 && GET_MODE (XEXP (op, 0)) == mode)
1077 return XEXP (op, 0);
1079 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1080 if (POINTERS_EXTEND_UNSIGNED > 0
1081 && mode == Pmode && GET_MODE (op) == ptr_mode
1082 && (CONSTANT_P (op)
1083 || (GET_CODE (op) == SUBREG
1084 && REG_P (SUBREG_REG (op))
1085 && REG_POINTER (SUBREG_REG (op))
1086 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1087 return convert_memory_address (Pmode, op);
1088 #endif
1089 break;
1091 default:
1092 break;
1095 return 0;
1099 /* Subroutine of simplify_binary_operation to simplify a commutative,
1100 associative binary operation CODE with result mode MODE, operating
1101 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1102 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1103 canonicalization is possible. */
1105 static rtx
1106 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1107 rtx op0, rtx op1)
1109 rtx tem;
1111 /* Linearize the operator to the left. */
1112 if (GET_CODE (op1) == code)
1114 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1115 if (GET_CODE (op0) == code)
1117 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1118 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1121 /* "a op (b op c)" becomes "(b op c) op a". */
1122 if (! swap_commutative_operands_p (op1, op0))
1123 return simplify_gen_binary (code, mode, op1, op0);
1125 tem = op0;
1126 op0 = op1;
1127 op1 = tem;
1130 if (GET_CODE (op0) == code)
1132 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1133 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1135 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1136 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1139 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1140 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1141 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1142 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1143 if (tem != 0)
1144 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1146 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1147 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1148 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1149 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1150 if (tem != 0)
1151 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1154 return 0;
1157 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1158 and OP1. Return 0 if no simplification is possible.
1160 Don't use this for relational operations such as EQ or LT.
1161 Use simplify_relational_operation instead. */
1163 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1164 rtx op0, rtx op1)
1166 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1167 HOST_WIDE_INT val;
1168 unsigned int width = GET_MODE_BITSIZE (mode);
1169 rtx trueop0, trueop1;
1170 rtx tem;
1172 /* Relational operations don't work here. We must know the mode
1173 of the operands in order to do the comparison correctly.
1174 Assuming a full word can give incorrect results.
1175 Consider comparing 128 with -128 in QImode. */
1176 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1177 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1179 /* Make sure the constant is second. */
1180 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1181 && swap_commutative_operands_p (op0, op1))
1183 tem = op0, op0 = op1, op1 = tem;
1186 trueop0 = avoid_constant_pool_reference (op0);
1187 trueop1 = avoid_constant_pool_reference (op1);
1189 if (VECTOR_MODE_P (mode)
1190 && code != VEC_CONCAT
1191 && GET_CODE (trueop0) == CONST_VECTOR
1192 && GET_CODE (trueop1) == CONST_VECTOR)
1194 unsigned n_elts = GET_MODE_NUNITS (mode);
1195 enum machine_mode op0mode = GET_MODE (trueop0);
1196 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
1197 enum machine_mode op1mode = GET_MODE (trueop1);
1198 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
1199 rtvec v = rtvec_alloc (n_elts);
1200 unsigned int i;
1202 gcc_assert (op0_n_elts == n_elts);
1203 gcc_assert (op1_n_elts == n_elts);
1204 for (i = 0; i < n_elts; i++)
1206 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1207 CONST_VECTOR_ELT (trueop0, i),
1208 CONST_VECTOR_ELT (trueop1, i));
1209 if (!x)
1210 return 0;
1211 RTVEC_ELT (v, i) = x;
1214 return gen_rtx_CONST_VECTOR (mode, v);
1217 if (VECTOR_MODE_P (mode)
1218 && code == VEC_CONCAT
1219 && CONSTANT_P (trueop0) && CONSTANT_P (trueop1))
1221 unsigned n_elts = GET_MODE_NUNITS (mode);
1222 rtvec v = rtvec_alloc (n_elts);
1224 gcc_assert (n_elts >= 2);
1225 if (n_elts == 2)
1227 gcc_assert (GET_CODE (trueop0) != CONST_VECTOR);
1228 gcc_assert (GET_CODE (trueop1) != CONST_VECTOR);
1230 RTVEC_ELT (v, 0) = trueop0;
1231 RTVEC_ELT (v, 1) = trueop1;
1233 else
1235 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (trueop0));
1236 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (trueop1));
1237 unsigned i;
1239 gcc_assert (GET_CODE (trueop0) == CONST_VECTOR);
1240 gcc_assert (GET_CODE (trueop1) == CONST_VECTOR);
1241 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
1243 for (i = 0; i < op0_n_elts; ++i)
1244 RTVEC_ELT (v, i) = XVECEXP (trueop0, 0, i);
1245 for (i = 0; i < op1_n_elts; ++i)
1246 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (trueop1, 0, i);
1249 return gen_rtx_CONST_VECTOR (mode, v);
1252 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1253 && GET_CODE (trueop0) == CONST_DOUBLE
1254 && GET_CODE (trueop1) == CONST_DOUBLE
1255 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1257 if (code == AND
1258 || code == IOR
1259 || code == XOR)
1261 long tmp0[4];
1262 long tmp1[4];
1263 REAL_VALUE_TYPE r;
1264 int i;
1266 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1267 GET_MODE (op0));
1268 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1269 GET_MODE (op1));
1270 for (i = 0; i < 4; i++)
1272 switch (code)
1274 case AND:
1275 tmp0[i] &= tmp1[i];
1276 break;
1277 case IOR:
1278 tmp0[i] |= tmp1[i];
1279 break;
1280 case XOR:
1281 tmp0[i] ^= tmp1[i];
1282 break;
1283 default:
1284 gcc_unreachable ();
1287 real_from_target (&r, tmp0, mode);
1288 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1290 else
1292 REAL_VALUE_TYPE f0, f1, value, result;
1293 bool inexact;
1295 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1296 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1297 real_convert (&f0, mode, &f0);
1298 real_convert (&f1, mode, &f1);
1300 if (HONOR_SNANS (mode)
1301 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1302 return 0;
1304 if (code == DIV
1305 && REAL_VALUES_EQUAL (f1, dconst0)
1306 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1307 return 0;
1309 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1310 && flag_trapping_math
1311 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1313 int s0 = REAL_VALUE_NEGATIVE (f0);
1314 int s1 = REAL_VALUE_NEGATIVE (f1);
1316 switch (code)
1318 case PLUS:
1319 /* Inf + -Inf = NaN plus exception. */
1320 if (s0 != s1)
1321 return 0;
1322 break;
1323 case MINUS:
1324 /* Inf - Inf = NaN plus exception. */
1325 if (s0 == s1)
1326 return 0;
1327 break;
1328 case DIV:
1329 /* Inf / Inf = NaN plus exception. */
1330 return 0;
1331 default:
1332 break;
1336 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1337 && flag_trapping_math
1338 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1339 || (REAL_VALUE_ISINF (f1)
1340 && REAL_VALUES_EQUAL (f0, dconst0))))
1341 /* Inf * 0 = NaN plus exception. */
1342 return 0;
1344 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
1345 &f0, &f1);
1346 real_convert (&result, mode, &value);
1348 /* Don't constant fold this floating point operation if the
1349 result may dependent upon the run-time rounding mode and
1350 flag_rounding_math is set, or if GCC's software emulation
1351 is unable to accurately represent the result. */
1353 if ((flag_rounding_math
1354 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
1355 && !flag_unsafe_math_optimizations))
1356 && (inexact || !real_identical (&result, &value)))
1357 return NULL_RTX;
1359 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
1363 /* We can fold some multi-word operations. */
1364 if (GET_MODE_CLASS (mode) == MODE_INT
1365 && width == HOST_BITS_PER_WIDE_INT * 2
1366 && (GET_CODE (trueop0) == CONST_DOUBLE
1367 || GET_CODE (trueop0) == CONST_INT)
1368 && (GET_CODE (trueop1) == CONST_DOUBLE
1369 || GET_CODE (trueop1) == CONST_INT))
1371 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1372 HOST_WIDE_INT h1, h2, hv, ht;
1374 if (GET_CODE (trueop0) == CONST_DOUBLE)
1375 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1376 else
1377 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1379 if (GET_CODE (trueop1) == CONST_DOUBLE)
1380 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1381 else
1382 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1384 switch (code)
1386 case MINUS:
1387 /* A - B == A + (-B). */
1388 neg_double (l2, h2, &lv, &hv);
1389 l2 = lv, h2 = hv;
1391 /* Fall through.... */
1393 case PLUS:
1394 add_double (l1, h1, l2, h2, &lv, &hv);
1395 break;
1397 case MULT:
1398 mul_double (l1, h1, l2, h2, &lv, &hv);
1399 break;
1401 case DIV:
1402 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1403 &lv, &hv, &lt, &ht))
1404 return 0;
1405 break;
1407 case MOD:
1408 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1409 &lt, &ht, &lv, &hv))
1410 return 0;
1411 break;
1413 case UDIV:
1414 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1415 &lv, &hv, &lt, &ht))
1416 return 0;
1417 break;
1419 case UMOD:
1420 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1421 &lt, &ht, &lv, &hv))
1422 return 0;
1423 break;
1425 case AND:
1426 lv = l1 & l2, hv = h1 & h2;
1427 break;
1429 case IOR:
1430 lv = l1 | l2, hv = h1 | h2;
1431 break;
1433 case XOR:
1434 lv = l1 ^ l2, hv = h1 ^ h2;
1435 break;
1437 case SMIN:
1438 if (h1 < h2
1439 || (h1 == h2
1440 && ((unsigned HOST_WIDE_INT) l1
1441 < (unsigned HOST_WIDE_INT) l2)))
1442 lv = l1, hv = h1;
1443 else
1444 lv = l2, hv = h2;
1445 break;
1447 case SMAX:
1448 if (h1 > h2
1449 || (h1 == h2
1450 && ((unsigned HOST_WIDE_INT) l1
1451 > (unsigned HOST_WIDE_INT) l2)))
1452 lv = l1, hv = h1;
1453 else
1454 lv = l2, hv = h2;
1455 break;
1457 case UMIN:
1458 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1459 || (h1 == h2
1460 && ((unsigned HOST_WIDE_INT) l1
1461 < (unsigned HOST_WIDE_INT) l2)))
1462 lv = l1, hv = h1;
1463 else
1464 lv = l2, hv = h2;
1465 break;
1467 case UMAX:
1468 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1469 || (h1 == h2
1470 && ((unsigned HOST_WIDE_INT) l1
1471 > (unsigned HOST_WIDE_INT) l2)))
1472 lv = l1, hv = h1;
1473 else
1474 lv = l2, hv = h2;
1475 break;
1477 case LSHIFTRT: case ASHIFTRT:
1478 case ASHIFT:
1479 case ROTATE: case ROTATERT:
1480 if (SHIFT_COUNT_TRUNCATED)
1481 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1483 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1484 return 0;
1486 if (code == LSHIFTRT || code == ASHIFTRT)
1487 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1488 code == ASHIFTRT);
1489 else if (code == ASHIFT)
1490 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1491 else if (code == ROTATE)
1492 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1493 else /* code == ROTATERT */
1494 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1495 break;
1497 default:
1498 return 0;
1501 return immed_double_const (lv, hv, mode);
1504 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1505 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1507 /* Even if we can't compute a constant result,
1508 there are some cases worth simplifying. */
1510 switch (code)
1512 case PLUS:
1513 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1514 when x is NaN, infinite, or finite and nonzero. They aren't
1515 when x is -0 and the rounding mode is not towards -infinity,
1516 since (-0) + 0 is then 0. */
1517 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1518 return op0;
1520 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1521 transformations are safe even for IEEE. */
1522 if (GET_CODE (op0) == NEG)
1523 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1524 else if (GET_CODE (op1) == NEG)
1525 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1527 /* (~a) + 1 -> -a */
1528 if (INTEGRAL_MODE_P (mode)
1529 && GET_CODE (op0) == NOT
1530 && trueop1 == const1_rtx)
1531 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1533 /* Handle both-operands-constant cases. We can only add
1534 CONST_INTs to constants since the sum of relocatable symbols
1535 can't be handled by most assemblers. Don't add CONST_INT
1536 to CONST_INT since overflow won't be computed properly if wider
1537 than HOST_BITS_PER_WIDE_INT. */
1539 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1540 && GET_CODE (op1) == CONST_INT)
1541 return plus_constant (op0, INTVAL (op1));
1542 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1543 && GET_CODE (op0) == CONST_INT)
1544 return plus_constant (op1, INTVAL (op0));
1546 /* See if this is something like X * C - X or vice versa or
1547 if the multiplication is written as a shift. If so, we can
1548 distribute and make a new multiply, shift, or maybe just
1549 have X (if C is 2 in the example above). But don't make
1550 something more expensive than we had before. */
1552 if (! FLOAT_MODE_P (mode))
1554 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1555 rtx lhs = op0, rhs = op1;
1557 if (GET_CODE (lhs) == NEG)
1558 coeff0 = -1, lhs = XEXP (lhs, 0);
1559 else if (GET_CODE (lhs) == MULT
1560 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1562 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1564 else if (GET_CODE (lhs) == ASHIFT
1565 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1566 && INTVAL (XEXP (lhs, 1)) >= 0
1567 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1569 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1570 lhs = XEXP (lhs, 0);
1573 if (GET_CODE (rhs) == NEG)
1574 coeff1 = -1, rhs = XEXP (rhs, 0);
1575 else if (GET_CODE (rhs) == MULT
1576 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1578 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1580 else if (GET_CODE (rhs) == ASHIFT
1581 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1582 && INTVAL (XEXP (rhs, 1)) >= 0
1583 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1585 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1586 rhs = XEXP (rhs, 0);
1589 if (rtx_equal_p (lhs, rhs))
1591 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1592 tem = simplify_gen_binary (MULT, mode, lhs,
1593 GEN_INT (coeff0 + coeff1));
1594 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1595 ? tem : 0;
1599 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1600 if ((GET_CODE (op1) == CONST_INT
1601 || GET_CODE (op1) == CONST_DOUBLE)
1602 && GET_CODE (op0) == XOR
1603 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1604 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1605 && mode_signbit_p (mode, op1))
1606 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1607 simplify_gen_binary (XOR, mode, op1,
1608 XEXP (op0, 1)));
1610 /* If one of the operands is a PLUS or a MINUS, see if we can
1611 simplify this by the associative law.
1612 Don't use the associative law for floating point.
1613 The inaccuracy makes it nonassociative,
1614 and subtle programs can break if operations are associated. */
1616 if (INTEGRAL_MODE_P (mode)
1617 && (plus_minus_operand_p (op0)
1618 || plus_minus_operand_p (op1))
1619 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1620 return tem;
1622 /* Reassociate floating point addition only when the user
1623 specifies unsafe math optimizations. */
1624 if (FLOAT_MODE_P (mode)
1625 && flag_unsafe_math_optimizations)
1627 tem = simplify_associative_operation (code, mode, op0, op1);
1628 if (tem)
1629 return tem;
1631 break;
1633 case COMPARE:
1634 #ifdef HAVE_cc0
1635 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1636 using cc0, in which case we want to leave it as a COMPARE
1637 so we can distinguish it from a register-register-copy.
1639 In IEEE floating point, x-0 is not the same as x. */
1641 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1642 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1643 && trueop1 == CONST0_RTX (mode))
1644 return op0;
1645 #endif
1647 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1648 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1649 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1650 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1652 rtx xop00 = XEXP (op0, 0);
1653 rtx xop10 = XEXP (op1, 0);
1655 #ifdef HAVE_cc0
1656 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1657 #else
1658 if (REG_P (xop00) && REG_P (xop10)
1659 && GET_MODE (xop00) == GET_MODE (xop10)
1660 && REGNO (xop00) == REGNO (xop10)
1661 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1662 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1663 #endif
1664 return xop00;
1666 break;
1668 case MINUS:
1669 /* We can't assume x-x is 0 even with non-IEEE floating point,
1670 but since it is zero except in very strange circumstances, we
1671 will treat it as zero with -funsafe-math-optimizations. */
1672 if (rtx_equal_p (trueop0, trueop1)
1673 && ! side_effects_p (op0)
1674 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1675 return CONST0_RTX (mode);
1677 /* Change subtraction from zero into negation. (0 - x) is the
1678 same as -x when x is NaN, infinite, or finite and nonzero.
1679 But if the mode has signed zeros, and does not round towards
1680 -infinity, then 0 - 0 is 0, not -0. */
1681 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1682 return simplify_gen_unary (NEG, mode, op1, mode);
1684 /* (-1 - a) is ~a. */
1685 if (trueop0 == constm1_rtx)
1686 return simplify_gen_unary (NOT, mode, op1, mode);
1688 /* Subtracting 0 has no effect unless the mode has signed zeros
1689 and supports rounding towards -infinity. In such a case,
1690 0 - 0 is -0. */
1691 if (!(HONOR_SIGNED_ZEROS (mode)
1692 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1693 && trueop1 == CONST0_RTX (mode))
1694 return op0;
1696 /* See if this is something like X * C - X or vice versa or
1697 if the multiplication is written as a shift. If so, we can
1698 distribute and make a new multiply, shift, or maybe just
1699 have X (if C is 2 in the example above). But don't make
1700 something more expensive than we had before. */
1702 if (! FLOAT_MODE_P (mode))
1704 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1705 rtx lhs = op0, rhs = op1;
1707 if (GET_CODE (lhs) == NEG)
1708 coeff0 = -1, lhs = XEXP (lhs, 0);
1709 else if (GET_CODE (lhs) == MULT
1710 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1712 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1714 else if (GET_CODE (lhs) == ASHIFT
1715 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1716 && INTVAL (XEXP (lhs, 1)) >= 0
1717 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1719 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1720 lhs = XEXP (lhs, 0);
1723 if (GET_CODE (rhs) == NEG)
1724 coeff1 = - 1, rhs = XEXP (rhs, 0);
1725 else if (GET_CODE (rhs) == MULT
1726 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1728 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1730 else if (GET_CODE (rhs) == ASHIFT
1731 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1732 && INTVAL (XEXP (rhs, 1)) >= 0
1733 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1735 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1736 rhs = XEXP (rhs, 0);
1739 if (rtx_equal_p (lhs, rhs))
1741 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1742 tem = simplify_gen_binary (MULT, mode, lhs,
1743 GEN_INT (coeff0 - coeff1));
1744 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1745 ? tem : 0;
1749 /* (a - (-b)) -> (a + b). True even for IEEE. */
1750 if (GET_CODE (op1) == NEG)
1751 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1753 /* (-x - c) may be simplified as (-c - x). */
1754 if (GET_CODE (op0) == NEG
1755 && (GET_CODE (op1) == CONST_INT
1756 || GET_CODE (op1) == CONST_DOUBLE))
1758 tem = simplify_unary_operation (NEG, mode, op1, mode);
1759 if (tem)
1760 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1763 /* If one of the operands is a PLUS or a MINUS, see if we can
1764 simplify this by the associative law.
1765 Don't use the associative law for floating point.
1766 The inaccuracy makes it nonassociative,
1767 and subtle programs can break if operations are associated. */
1769 if (INTEGRAL_MODE_P (mode)
1770 && (plus_minus_operand_p (op0)
1771 || plus_minus_operand_p (op1))
1772 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1773 return tem;
1775 /* Don't let a relocatable value get a negative coeff. */
1776 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1777 return simplify_gen_binary (PLUS, mode,
1778 op0,
1779 neg_const_int (mode, op1));
1781 /* (x - (x & y)) -> (x & ~y) */
1782 if (GET_CODE (op1) == AND)
1784 if (rtx_equal_p (op0, XEXP (op1, 0)))
1786 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1787 GET_MODE (XEXP (op1, 1)));
1788 return simplify_gen_binary (AND, mode, op0, tem);
1790 if (rtx_equal_p (op0, XEXP (op1, 1)))
1792 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1793 GET_MODE (XEXP (op1, 0)));
1794 return simplify_gen_binary (AND, mode, op0, tem);
1797 break;
1799 case MULT:
1800 if (trueop1 == constm1_rtx)
1801 return simplify_gen_unary (NEG, mode, op0, mode);
1803 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1804 x is NaN, since x * 0 is then also NaN. Nor is it valid
1805 when the mode has signed zeros, since multiplying a negative
1806 number by 0 will give -0, not 0. */
1807 if (!HONOR_NANS (mode)
1808 && !HONOR_SIGNED_ZEROS (mode)
1809 && trueop1 == CONST0_RTX (mode)
1810 && ! side_effects_p (op0))
1811 return op1;
1813 /* In IEEE floating point, x*1 is not equivalent to x for
1814 signalling NaNs. */
1815 if (!HONOR_SNANS (mode)
1816 && trueop1 == CONST1_RTX (mode))
1817 return op0;
1819 /* Convert multiply by constant power of two into shift unless
1820 we are still generating RTL. This test is a kludge. */
1821 if (GET_CODE (trueop1) == CONST_INT
1822 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1823 /* If the mode is larger than the host word size, and the
1824 uppermost bit is set, then this isn't a power of two due
1825 to implicit sign extension. */
1826 && (width <= HOST_BITS_PER_WIDE_INT
1827 || val != HOST_BITS_PER_WIDE_INT - 1))
1828 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1830 /* x*2 is x+x and x*(-1) is -x */
1831 if (GET_CODE (trueop1) == CONST_DOUBLE
1832 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1833 && GET_MODE (op0) == mode)
1835 REAL_VALUE_TYPE d;
1836 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1838 if (REAL_VALUES_EQUAL (d, dconst2))
1839 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1841 if (REAL_VALUES_EQUAL (d, dconstm1))
1842 return simplify_gen_unary (NEG, mode, op0, mode);
1845 /* Reassociate multiplication, but for floating point MULTs
1846 only when the user specifies unsafe math optimizations. */
1847 if (! FLOAT_MODE_P (mode)
1848 || flag_unsafe_math_optimizations)
1850 tem = simplify_associative_operation (code, mode, op0, op1);
1851 if (tem)
1852 return tem;
1854 break;
1856 case IOR:
1857 if (trueop1 == const0_rtx)
1858 return op0;
1859 if (GET_CODE (trueop1) == CONST_INT
1860 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1861 == GET_MODE_MASK (mode)))
1862 return op1;
1863 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1864 return op0;
1865 /* A | (~A) -> -1 */
1866 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1867 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1868 && ! side_effects_p (op0)
1869 && GET_MODE_CLASS (mode) != MODE_CC)
1870 return constm1_rtx;
1871 tem = simplify_associative_operation (code, mode, op0, op1);
1872 if (tem)
1873 return tem;
1874 break;
1876 case XOR:
1877 if (trueop1 == const0_rtx)
1878 return op0;
1879 if (GET_CODE (trueop1) == CONST_INT
1880 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1881 == GET_MODE_MASK (mode)))
1882 return simplify_gen_unary (NOT, mode, op0, mode);
1883 if (trueop0 == trueop1
1884 && ! side_effects_p (op0)
1885 && GET_MODE_CLASS (mode) != MODE_CC)
1886 return const0_rtx;
1888 /* Canonicalize XOR of the most significant bit to PLUS. */
1889 if ((GET_CODE (op1) == CONST_INT
1890 || GET_CODE (op1) == CONST_DOUBLE)
1891 && mode_signbit_p (mode, op1))
1892 return simplify_gen_binary (PLUS, mode, op0, op1);
1893 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1894 if ((GET_CODE (op1) == CONST_INT
1895 || GET_CODE (op1) == CONST_DOUBLE)
1896 && GET_CODE (op0) == PLUS
1897 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1898 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1899 && mode_signbit_p (mode, XEXP (op0, 1)))
1900 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1901 simplify_gen_binary (XOR, mode, op1,
1902 XEXP (op0, 1)));
1904 tem = simplify_associative_operation (code, mode, op0, op1);
1905 if (tem)
1906 return tem;
1907 break;
1909 case AND:
1910 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1911 return const0_rtx;
1912 /* If we are turning off bits already known off in OP0, we need
1913 not do an AND. */
1914 if (GET_CODE (trueop1) == CONST_INT
1915 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1916 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1917 return op0;
1918 if (trueop0 == trueop1 && ! side_effects_p (op0)
1919 && GET_MODE_CLASS (mode) != MODE_CC)
1920 return op0;
1921 /* A & (~A) -> 0 */
1922 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1923 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1924 && ! side_effects_p (op0)
1925 && GET_MODE_CLASS (mode) != MODE_CC)
1926 return const0_rtx;
1928 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1929 there are no nonzero bits of C outside of X's mode. */
1930 if ((GET_CODE (op0) == SIGN_EXTEND
1931 || GET_CODE (op0) == ZERO_EXTEND)
1932 && GET_CODE (trueop1) == CONST_INT
1933 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1934 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1935 & INTVAL (trueop1)) == 0)
1937 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1938 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1939 gen_int_mode (INTVAL (trueop1),
1940 imode));
1941 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1944 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1945 ((A & N) + B) & M -> (A + B) & M
1946 Similarly if (N & M) == 0,
1947 ((A | N) + B) & M -> (A + B) & M
1948 and for - instead of + and/or ^ instead of |. */
1949 if (GET_CODE (trueop1) == CONST_INT
1950 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1951 && ~INTVAL (trueop1)
1952 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1953 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1955 rtx pmop[2];
1956 int which;
1958 pmop[0] = XEXP (op0, 0);
1959 pmop[1] = XEXP (op0, 1);
1961 for (which = 0; which < 2; which++)
1963 tem = pmop[which];
1964 switch (GET_CODE (tem))
1966 case AND:
1967 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1968 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1969 == INTVAL (trueop1))
1970 pmop[which] = XEXP (tem, 0);
1971 break;
1972 case IOR:
1973 case XOR:
1974 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1975 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1976 pmop[which] = XEXP (tem, 0);
1977 break;
1978 default:
1979 break;
1983 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1985 tem = simplify_gen_binary (GET_CODE (op0), mode,
1986 pmop[0], pmop[1]);
1987 return simplify_gen_binary (code, mode, tem, op1);
1990 tem = simplify_associative_operation (code, mode, op0, op1);
1991 if (tem)
1992 return tem;
1993 break;
1995 case UDIV:
1996 /* 0/x is 0 (or x&0 if x has side-effects). */
1997 if (trueop0 == const0_rtx)
1998 return side_effects_p (op1)
1999 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2000 : const0_rtx;
2001 /* x/1 is x. */
2002 if (trueop1 == const1_rtx)
2004 /* Handle narrowing UDIV. */
2005 rtx x = gen_lowpart_common (mode, op0);
2006 if (x)
2007 return x;
2008 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2009 return gen_lowpart_SUBREG (mode, op0);
2010 return op0;
2012 /* Convert divide by power of two into shift. */
2013 if (GET_CODE (trueop1) == CONST_INT
2014 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
2015 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
2016 break;
2018 case DIV:
2019 /* Handle floating point and integers separately. */
2020 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2022 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2023 safe for modes with NaNs, since 0.0 / 0.0 will then be
2024 NaN rather than 0.0. Nor is it safe for modes with signed
2025 zeros, since dividing 0 by a negative number gives -0.0 */
2026 if (trueop0 == CONST0_RTX (mode)
2027 && !HONOR_NANS (mode)
2028 && !HONOR_SIGNED_ZEROS (mode)
2029 && ! side_effects_p (op1))
2030 return op0;
2031 /* x/1.0 is x. */
2032 if (trueop1 == CONST1_RTX (mode)
2033 && !HONOR_SNANS (mode))
2034 return op0;
2036 if (GET_CODE (trueop1) == CONST_DOUBLE
2037 && trueop1 != CONST0_RTX (mode))
2039 REAL_VALUE_TYPE d;
2040 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2042 /* x/-1.0 is -x. */
2043 if (REAL_VALUES_EQUAL (d, dconstm1)
2044 && !HONOR_SNANS (mode))
2045 return simplify_gen_unary (NEG, mode, op0, mode);
2047 /* Change FP division by a constant into multiplication.
2048 Only do this with -funsafe-math-optimizations. */
2049 if (flag_unsafe_math_optimizations
2050 && !REAL_VALUES_EQUAL (d, dconst0))
2052 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2053 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2054 return simplify_gen_binary (MULT, mode, op0, tem);
2058 else
2060 /* 0/x is 0 (or x&0 if x has side-effects). */
2061 if (trueop0 == const0_rtx)
2062 return side_effects_p (op1)
2063 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2064 : const0_rtx;
2065 /* x/1 is x. */
2066 if (trueop1 == const1_rtx)
2068 /* Handle narrowing DIV. */
2069 rtx x = gen_lowpart_common (mode, op0);
2070 if (x)
2071 return x;
2072 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2073 return gen_lowpart_SUBREG (mode, op0);
2074 return op0;
2076 /* x/-1 is -x. */
2077 if (trueop1 == constm1_rtx)
2079 rtx x = gen_lowpart_common (mode, op0);
2080 if (!x)
2081 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2082 ? gen_lowpart_SUBREG (mode, op0) : op0;
2083 return simplify_gen_unary (NEG, mode, x, mode);
2086 break;
2088 case UMOD:
2089 /* 0%x is 0 (or x&0 if x has side-effects). */
2090 if (trueop0 == const0_rtx)
2091 return side_effects_p (op1)
2092 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2093 : const0_rtx;
2094 /* x%1 is 0 (of x&0 if x has side-effects). */
2095 if (trueop1 == const1_rtx)
2096 return side_effects_p (op0)
2097 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2098 : const0_rtx;
2099 /* Implement modulus by power of two as AND. */
2100 if (GET_CODE (trueop1) == CONST_INT
2101 && exact_log2 (INTVAL (trueop1)) > 0)
2102 return simplify_gen_binary (AND, mode, op0,
2103 GEN_INT (INTVAL (op1) - 1));
2104 break;
2106 case MOD:
2107 /* 0%x is 0 (or x&0 if x has side-effects). */
2108 if (trueop0 == const0_rtx)
2109 return side_effects_p (op1)
2110 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2111 : const0_rtx;
2112 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2113 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
2114 return side_effects_p (op0)
2115 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2116 : const0_rtx;
2117 break;
2119 case ROTATERT:
2120 case ROTATE:
2121 case ASHIFTRT:
2122 /* Rotating ~0 always results in ~0. */
2123 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2124 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2125 && ! side_effects_p (op1))
2126 return op0;
2128 /* Fall through.... */
2130 case ASHIFT:
2131 case LSHIFTRT:
2132 if (trueop1 == const0_rtx)
2133 return op0;
2134 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2135 return op0;
2136 break;
2138 case SMIN:
2139 if (width <= HOST_BITS_PER_WIDE_INT
2140 && GET_CODE (trueop1) == CONST_INT
2141 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2142 && ! side_effects_p (op0))
2143 return op1;
2144 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2145 return op0;
2146 tem = simplify_associative_operation (code, mode, op0, op1);
2147 if (tem)
2148 return tem;
2149 break;
2151 case SMAX:
2152 if (width <= HOST_BITS_PER_WIDE_INT
2153 && GET_CODE (trueop1) == CONST_INT
2154 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2155 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2156 && ! side_effects_p (op0))
2157 return op1;
2158 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2159 return op0;
2160 tem = simplify_associative_operation (code, mode, op0, op1);
2161 if (tem)
2162 return tem;
2163 break;
2165 case UMIN:
2166 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2167 return op1;
2168 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2169 return op0;
2170 tem = simplify_associative_operation (code, mode, op0, op1);
2171 if (tem)
2172 return tem;
2173 break;
2175 case UMAX:
2176 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2177 return op1;
2178 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2179 return op0;
2180 tem = simplify_associative_operation (code, mode, op0, op1);
2181 if (tem)
2182 return tem;
2183 break;
2185 case SS_PLUS:
2186 case US_PLUS:
2187 case SS_MINUS:
2188 case US_MINUS:
2189 /* ??? There are simplifications that can be done. */
2190 return 0;
2192 case VEC_SELECT:
2193 if (!VECTOR_MODE_P (mode))
2195 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2196 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2197 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2198 gcc_assert (XVECLEN (trueop1, 0) == 1);
2199 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2201 if (GET_CODE (trueop0) == CONST_VECTOR)
2202 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2203 (trueop1, 0, 0)));
2205 else
2207 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2208 gcc_assert (GET_MODE_INNER (mode)
2209 == GET_MODE_INNER (GET_MODE (trueop0)));
2210 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2212 if (GET_CODE (trueop0) == CONST_VECTOR)
2214 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2215 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2216 rtvec v = rtvec_alloc (n_elts);
2217 unsigned int i;
2219 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2220 for (i = 0; i < n_elts; i++)
2222 rtx x = XVECEXP (trueop1, 0, i);
2224 gcc_assert (GET_CODE (x) == CONST_INT);
2225 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2226 INTVAL (x));
2229 return gen_rtx_CONST_VECTOR (mode, v);
2232 return 0;
2233 case VEC_CONCAT:
2235 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2236 ? GET_MODE (trueop0)
2237 : GET_MODE_INNER (mode));
2238 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2239 ? GET_MODE (trueop1)
2240 : GET_MODE_INNER (mode));
2242 gcc_assert (VECTOR_MODE_P (mode));
2243 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2244 == GET_MODE_SIZE (mode));
2246 if (VECTOR_MODE_P (op0_mode))
2247 gcc_assert (GET_MODE_INNER (mode)
2248 == GET_MODE_INNER (op0_mode));
2249 else
2250 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2252 if (VECTOR_MODE_P (op1_mode))
2253 gcc_assert (GET_MODE_INNER (mode)
2254 == GET_MODE_INNER (op1_mode));
2255 else
2256 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2258 if ((GET_CODE (trueop0) == CONST_VECTOR
2259 || GET_CODE (trueop0) == CONST_INT
2260 || GET_CODE (trueop0) == CONST_DOUBLE)
2261 && (GET_CODE (trueop1) == CONST_VECTOR
2262 || GET_CODE (trueop1) == CONST_INT
2263 || GET_CODE (trueop1) == CONST_DOUBLE))
2265 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2266 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2267 rtvec v = rtvec_alloc (n_elts);
2268 unsigned int i;
2269 unsigned in_n_elts = 1;
2271 if (VECTOR_MODE_P (op0_mode))
2272 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2273 for (i = 0; i < n_elts; i++)
2275 if (i < in_n_elts)
2277 if (!VECTOR_MODE_P (op0_mode))
2278 RTVEC_ELT (v, i) = trueop0;
2279 else
2280 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2282 else
2284 if (!VECTOR_MODE_P (op1_mode))
2285 RTVEC_ELT (v, i) = trueop1;
2286 else
2287 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2288 i - in_n_elts);
2292 return gen_rtx_CONST_VECTOR (mode, v);
2295 return 0;
2297 default:
2298 gcc_unreachable ();
2301 return 0;
2304 /* Get the integer argument values in two forms:
2305 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2307 arg0 = INTVAL (trueop0);
2308 arg1 = INTVAL (trueop1);
2310 if (width < HOST_BITS_PER_WIDE_INT)
2312 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2313 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2315 arg0s = arg0;
2316 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2317 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2319 arg1s = arg1;
2320 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2321 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2323 else
2325 arg0s = arg0;
2326 arg1s = arg1;
2329 /* Compute the value of the arithmetic. */
2331 switch (code)
2333 case PLUS:
2334 val = arg0s + arg1s;
2335 break;
2337 case MINUS:
2338 val = arg0s - arg1s;
2339 break;
2341 case MULT:
2342 val = arg0s * arg1s;
2343 break;
2345 case DIV:
2346 if (arg1s == 0
2347 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2348 && arg1s == -1))
2349 return 0;
2350 val = arg0s / arg1s;
2351 break;
2353 case MOD:
2354 if (arg1s == 0
2355 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2356 && arg1s == -1))
2357 return 0;
2358 val = arg0s % arg1s;
2359 break;
2361 case UDIV:
2362 if (arg1 == 0
2363 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2364 && arg1s == -1))
2365 return 0;
2366 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2367 break;
2369 case UMOD:
2370 if (arg1 == 0
2371 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2372 && arg1s == -1))
2373 return 0;
2374 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2375 break;
2377 case AND:
2378 val = arg0 & arg1;
2379 break;
2381 case IOR:
2382 val = arg0 | arg1;
2383 break;
2385 case XOR:
2386 val = arg0 ^ arg1;
2387 break;
2389 case LSHIFTRT:
2390 case ASHIFT:
2391 case ASHIFTRT:
2392 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2393 value is in range. We can't return any old value for out-of-range
2394 arguments because either the middle-end (via shift_truncation_mask)
2395 or the back-end might be relying on target-specific knowledge.
2396 Nor can we rely on shift_truncation_mask, since the shift might
2397 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2398 if (SHIFT_COUNT_TRUNCATED)
2399 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2400 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2401 return 0;
2403 val = (code == ASHIFT
2404 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2405 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2407 /* Sign-extend the result for arithmetic right shifts. */
2408 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2409 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2410 break;
2412 case ROTATERT:
2413 if (arg1 < 0)
2414 return 0;
2416 arg1 %= width;
2417 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2418 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2419 break;
2421 case ROTATE:
2422 if (arg1 < 0)
2423 return 0;
2425 arg1 %= width;
2426 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2427 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2428 break;
2430 case COMPARE:
2431 /* Do nothing here. */
2432 return 0;
2434 case SMIN:
2435 val = arg0s <= arg1s ? arg0s : arg1s;
2436 break;
2438 case UMIN:
2439 val = ((unsigned HOST_WIDE_INT) arg0
2440 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2441 break;
2443 case SMAX:
2444 val = arg0s > arg1s ? arg0s : arg1s;
2445 break;
2447 case UMAX:
2448 val = ((unsigned HOST_WIDE_INT) arg0
2449 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2450 break;
2452 case SS_PLUS:
2453 case US_PLUS:
2454 case SS_MINUS:
2455 case US_MINUS:
2456 /* ??? There are simplifications that can be done. */
2457 return 0;
2459 default:
2460 gcc_unreachable ();
2463 val = trunc_int_for_mode (val, mode);
2465 return GEN_INT (val);
2468 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2469 PLUS or MINUS.
2471 Rather than test for specific case, we do this by a brute-force method
2472 and do all possible simplifications until no more changes occur. Then
2473 we rebuild the operation.
2475 If FORCE is true, then always generate the rtx. This is used to
2476 canonicalize stuff emitted from simplify_gen_binary. Note that this
2477 can still fail if the rtx is too complex. It won't fail just because
2478 the result is not 'simpler' than the input, however. */
2480 struct simplify_plus_minus_op_data
2482 rtx op;
2483 int neg;
2486 static int
2487 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2489 const struct simplify_plus_minus_op_data *d1 = p1;
2490 const struct simplify_plus_minus_op_data *d2 = p2;
2492 return (commutative_operand_precedence (d2->op)
2493 - commutative_operand_precedence (d1->op));
2496 static rtx
2497 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2498 rtx op1, int force)
2500 struct simplify_plus_minus_op_data ops[8];
2501 rtx result, tem;
2502 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2503 int first, changed;
2504 int i, j;
2506 memset (ops, 0, sizeof ops);
2508 /* Set up the two operands and then expand them until nothing has been
2509 changed. If we run out of room in our array, give up; this should
2510 almost never happen. */
2512 ops[0].op = op0;
2513 ops[0].neg = 0;
2514 ops[1].op = op1;
2515 ops[1].neg = (code == MINUS);
2519 changed = 0;
2521 for (i = 0; i < n_ops; i++)
2523 rtx this_op = ops[i].op;
2524 int this_neg = ops[i].neg;
2525 enum rtx_code this_code = GET_CODE (this_op);
2527 switch (this_code)
2529 case PLUS:
2530 case MINUS:
2531 if (n_ops == 7)
2532 return NULL_RTX;
2534 ops[n_ops].op = XEXP (this_op, 1);
2535 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2536 n_ops++;
2538 ops[i].op = XEXP (this_op, 0);
2539 input_ops++;
2540 changed = 1;
2541 break;
2543 case NEG:
2544 ops[i].op = XEXP (this_op, 0);
2545 ops[i].neg = ! this_neg;
2546 changed = 1;
2547 break;
2549 case CONST:
2550 if (n_ops < 7
2551 && GET_CODE (XEXP (this_op, 0)) == PLUS
2552 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2553 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2555 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2556 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2557 ops[n_ops].neg = this_neg;
2558 n_ops++;
2559 input_consts++;
2560 changed = 1;
2562 break;
2564 case NOT:
2565 /* ~a -> (-a - 1) */
2566 if (n_ops != 7)
2568 ops[n_ops].op = constm1_rtx;
2569 ops[n_ops++].neg = this_neg;
2570 ops[i].op = XEXP (this_op, 0);
2571 ops[i].neg = !this_neg;
2572 changed = 1;
2574 break;
2576 case CONST_INT:
2577 if (this_neg)
2579 ops[i].op = neg_const_int (mode, this_op);
2580 ops[i].neg = 0;
2581 changed = 1;
2583 break;
2585 default:
2586 break;
2590 while (changed);
2592 /* If we only have two operands, we can't do anything. */
2593 if (n_ops <= 2 && !force)
2594 return NULL_RTX;
2596 /* Count the number of CONSTs we didn't split above. */
2597 for (i = 0; i < n_ops; i++)
2598 if (GET_CODE (ops[i].op) == CONST)
2599 input_consts++;
2601 /* Now simplify each pair of operands until nothing changes. The first
2602 time through just simplify constants against each other. */
2604 first = 1;
2607 changed = first;
2609 for (i = 0; i < n_ops - 1; i++)
2610 for (j = i + 1; j < n_ops; j++)
2612 rtx lhs = ops[i].op, rhs = ops[j].op;
2613 int lneg = ops[i].neg, rneg = ops[j].neg;
2615 if (lhs != 0 && rhs != 0
2616 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2618 enum rtx_code ncode = PLUS;
2620 if (lneg != rneg)
2622 ncode = MINUS;
2623 if (lneg)
2624 tem = lhs, lhs = rhs, rhs = tem;
2626 else if (swap_commutative_operands_p (lhs, rhs))
2627 tem = lhs, lhs = rhs, rhs = tem;
2629 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2631 /* Reject "simplifications" that just wrap the two
2632 arguments in a CONST. Failure to do so can result
2633 in infinite recursion with simplify_binary_operation
2634 when it calls us to simplify CONST operations. */
2635 if (tem
2636 && ! (GET_CODE (tem) == CONST
2637 && GET_CODE (XEXP (tem, 0)) == ncode
2638 && XEXP (XEXP (tem, 0), 0) == lhs
2639 && XEXP (XEXP (tem, 0), 1) == rhs)
2640 /* Don't allow -x + -1 -> ~x simplifications in the
2641 first pass. This allows us the chance to combine
2642 the -1 with other constants. */
2643 && ! (first
2644 && GET_CODE (tem) == NOT
2645 && XEXP (tem, 0) == rhs))
2647 lneg &= rneg;
2648 if (GET_CODE (tem) == NEG)
2649 tem = XEXP (tem, 0), lneg = !lneg;
2650 if (GET_CODE (tem) == CONST_INT && lneg)
2651 tem = neg_const_int (mode, tem), lneg = 0;
2653 ops[i].op = tem;
2654 ops[i].neg = lneg;
2655 ops[j].op = NULL_RTX;
2656 changed = 1;
2661 first = 0;
2663 while (changed);
2665 /* Pack all the operands to the lower-numbered entries. */
2666 for (i = 0, j = 0; j < n_ops; j++)
2667 if (ops[j].op)
2668 ops[i++] = ops[j];
2669 n_ops = i;
2671 /* Sort the operations based on swap_commutative_operands_p. */
2672 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2674 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2675 if (n_ops == 2
2676 && GET_CODE (ops[1].op) == CONST_INT
2677 && CONSTANT_P (ops[0].op)
2678 && ops[0].neg)
2679 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2681 /* We suppressed creation of trivial CONST expressions in the
2682 combination loop to avoid recursion. Create one manually now.
2683 The combination loop should have ensured that there is exactly
2684 one CONST_INT, and the sort will have ensured that it is last
2685 in the array and that any other constant will be next-to-last. */
2687 if (n_ops > 1
2688 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2689 && CONSTANT_P (ops[n_ops - 2].op))
2691 rtx value = ops[n_ops - 1].op;
2692 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2693 value = neg_const_int (mode, value);
2694 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2695 n_ops--;
2698 /* Count the number of CONSTs that we generated. */
2699 n_consts = 0;
2700 for (i = 0; i < n_ops; i++)
2701 if (GET_CODE (ops[i].op) == CONST)
2702 n_consts++;
2704 /* Give up if we didn't reduce the number of operands we had. Make
2705 sure we count a CONST as two operands. If we have the same
2706 number of operands, but have made more CONSTs than before, this
2707 is also an improvement, so accept it. */
2708 if (!force
2709 && (n_ops + n_consts > input_ops
2710 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2711 return NULL_RTX;
2713 /* Put a non-negated operand first, if possible. */
2715 for (i = 0; i < n_ops && ops[i].neg; i++)
2716 continue;
2717 if (i == n_ops)
2718 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2719 else if (i != 0)
2721 tem = ops[0].op;
2722 ops[0] = ops[i];
2723 ops[i].op = tem;
2724 ops[i].neg = 1;
2727 /* Now make the result by performing the requested operations. */
2728 result = ops[0].op;
2729 for (i = 1; i < n_ops; i++)
2730 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2731 mode, result, ops[i].op);
2733 return result;
2736 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2737 static bool
2738 plus_minus_operand_p (rtx x)
2740 return GET_CODE (x) == PLUS
2741 || GET_CODE (x) == MINUS
2742 || (GET_CODE (x) == CONST
2743 && GET_CODE (XEXP (x, 0)) == PLUS
2744 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2745 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2748 /* Like simplify_binary_operation except used for relational operators.
2749 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2750 not also be VOIDmode.
2752 CMP_MODE specifies in which mode the comparison is done in, so it is
2753 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2754 the operands or, if both are VOIDmode, the operands are compared in
2755 "infinite precision". */
2757 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2758 enum machine_mode cmp_mode, rtx op0, rtx op1)
2760 rtx tem, trueop0, trueop1;
2762 if (cmp_mode == VOIDmode)
2763 cmp_mode = GET_MODE (op0);
2764 if (cmp_mode == VOIDmode)
2765 cmp_mode = GET_MODE (op1);
2767 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2768 if (tem)
2770 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2772 if (tem == const0_rtx)
2773 return CONST0_RTX (mode);
2774 #ifdef FLOAT_STORE_FLAG_VALUE
2776 REAL_VALUE_TYPE val;
2777 val = FLOAT_STORE_FLAG_VALUE (mode);
2778 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2780 #else
2781 return NULL_RTX;
2782 #endif
2784 if (VECTOR_MODE_P (mode))
2786 if (tem == const0_rtx)
2787 return CONST0_RTX (mode);
2788 #ifdef VECTOR_STORE_FLAG_VALUE
2790 int i, units;
2791 rtvec v;
2793 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2794 if (val == NULL_RTX)
2795 return NULL_RTX;
2796 if (val == const1_rtx)
2797 return CONST1_RTX (mode);
2799 units = GET_MODE_NUNITS (mode);
2800 v = rtvec_alloc (units);
2801 for (i = 0; i < units; i++)
2802 RTVEC_ELT (v, i) = val;
2803 return gen_rtx_raw_CONST_VECTOR (mode, v);
2805 #else
2806 return NULL_RTX;
2807 #endif
2810 return tem;
2813 /* For the following tests, ensure const0_rtx is op1. */
2814 if (swap_commutative_operands_p (op0, op1)
2815 || (op0 == const0_rtx && op1 != const0_rtx))
2816 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2818 /* If op0 is a compare, extract the comparison arguments from it. */
2819 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2820 return simplify_relational_operation (code, mode, VOIDmode,
2821 XEXP (op0, 0), XEXP (op0, 1));
2823 if (mode == VOIDmode
2824 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2825 || CC0_P (op0))
2826 return NULL_RTX;
2828 trueop0 = avoid_constant_pool_reference (op0);
2829 trueop1 = avoid_constant_pool_reference (op1);
2830 return simplify_relational_operation_1 (code, mode, cmp_mode,
2831 trueop0, trueop1);
2834 /* This part of simplify_relational_operation is only used when CMP_MODE
2835 is not in class MODE_CC (i.e. it is a real comparison).
2837 MODE is the mode of the result, while CMP_MODE specifies in which
2838 mode the comparison is done in, so it is the mode of the operands. */
2840 static rtx
2841 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2842 enum machine_mode cmp_mode, rtx op0, rtx op1)
2844 enum rtx_code op0code = GET_CODE (op0);
2846 if (GET_CODE (op1) == CONST_INT)
2848 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2850 /* If op0 is a comparison, extract the comparison arguments form it. */
2851 if (code == NE)
2853 if (GET_MODE (op0) == cmp_mode)
2854 return simplify_rtx (op0);
2855 else
2856 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2857 XEXP (op0, 0), XEXP (op0, 1));
2859 else if (code == EQ)
2861 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2862 if (new_code != UNKNOWN)
2863 return simplify_gen_relational (new_code, mode, VOIDmode,
2864 XEXP (op0, 0), XEXP (op0, 1));
2869 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2870 if ((code == EQ || code == NE)
2871 && (op0code == PLUS || op0code == MINUS)
2872 && CONSTANT_P (op1)
2873 && CONSTANT_P (XEXP (op0, 1))
2874 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
2876 rtx x = XEXP (op0, 0);
2877 rtx c = XEXP (op0, 1);
2879 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
2880 cmp_mode, op1, c);
2881 return simplify_gen_relational (code, mode, cmp_mode, x, c);
2884 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
2885 the same as (zero_extract:SI FOO (const_int 1) BAR). */
2886 if (code == NE
2887 && op1 == const0_rtx
2888 && GET_MODE_CLASS (mode) == MODE_INT
2889 && cmp_mode != VOIDmode
2890 /* ??? Work-around BImode bugs in the ia64 backend. */
2891 && mode != BImode
2892 && cmp_mode != BImode
2893 && nonzero_bits (op0, cmp_mode) == 1
2894 && STORE_FLAG_VALUE == 1)
2895 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
2896 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
2897 : lowpart_subreg (mode, op0, cmp_mode);
2899 return NULL_RTX;
2902 /* Check if the given comparison (done in the given MODE) is actually a
2903 tautology or a contradiction.
2904 If no simplification is possible, this function returns zero.
2905 Otherwise, it returns either const_true_rtx or const0_rtx. */
2908 simplify_const_relational_operation (enum rtx_code code,
2909 enum machine_mode mode,
2910 rtx op0, rtx op1)
2912 int equal, op0lt, op0ltu, op1lt, op1ltu;
2913 rtx tem;
2914 rtx trueop0;
2915 rtx trueop1;
2917 gcc_assert (mode != VOIDmode
2918 || (GET_MODE (op0) == VOIDmode
2919 && GET_MODE (op1) == VOIDmode));
2921 /* If op0 is a compare, extract the comparison arguments from it. */
2922 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2923 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2925 /* We can't simplify MODE_CC values since we don't know what the
2926 actual comparison is. */
2927 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2928 return 0;
2930 /* Make sure the constant is second. */
2931 if (swap_commutative_operands_p (op0, op1))
2933 tem = op0, op0 = op1, op1 = tem;
2934 code = swap_condition (code);
2937 trueop0 = avoid_constant_pool_reference (op0);
2938 trueop1 = avoid_constant_pool_reference (op1);
2940 /* For integer comparisons of A and B maybe we can simplify A - B and can
2941 then simplify a comparison of that with zero. If A and B are both either
2942 a register or a CONST_INT, this can't help; testing for these cases will
2943 prevent infinite recursion here and speed things up.
2945 If CODE is an unsigned comparison, then we can never do this optimization,
2946 because it gives an incorrect result if the subtraction wraps around zero.
2947 ANSI C defines unsigned operations such that they never overflow, and
2948 thus such cases can not be ignored; but we cannot do it even for
2949 signed comparisons for languages such as Java, so test flag_wrapv. */
2951 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2952 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2953 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2954 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2955 /* We cannot do this for == or != if tem is a nonzero address. */
2956 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2957 && code != GTU && code != GEU && code != LTU && code != LEU)
2958 return simplify_const_relational_operation (signed_condition (code),
2959 mode, tem, const0_rtx);
2961 if (flag_unsafe_math_optimizations && code == ORDERED)
2962 return const_true_rtx;
2964 if (flag_unsafe_math_optimizations && code == UNORDERED)
2965 return const0_rtx;
2967 /* For modes without NaNs, if the two operands are equal, we know the
2968 result except if they have side-effects. */
2969 if (! HONOR_NANS (GET_MODE (trueop0))
2970 && rtx_equal_p (trueop0, trueop1)
2971 && ! side_effects_p (trueop0))
2972 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2974 /* If the operands are floating-point constants, see if we can fold
2975 the result. */
2976 else if (GET_CODE (trueop0) == CONST_DOUBLE
2977 && GET_CODE (trueop1) == CONST_DOUBLE
2978 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2980 REAL_VALUE_TYPE d0, d1;
2982 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2983 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2985 /* Comparisons are unordered iff at least one of the values is NaN. */
2986 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2987 switch (code)
2989 case UNEQ:
2990 case UNLT:
2991 case UNGT:
2992 case UNLE:
2993 case UNGE:
2994 case NE:
2995 case UNORDERED:
2996 return const_true_rtx;
2997 case EQ:
2998 case LT:
2999 case GT:
3000 case LE:
3001 case GE:
3002 case LTGT:
3003 case ORDERED:
3004 return const0_rtx;
3005 default:
3006 return 0;
3009 equal = REAL_VALUES_EQUAL (d0, d1);
3010 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3011 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3014 /* Otherwise, see if the operands are both integers. */
3015 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3016 && (GET_CODE (trueop0) == CONST_DOUBLE
3017 || GET_CODE (trueop0) == CONST_INT)
3018 && (GET_CODE (trueop1) == CONST_DOUBLE
3019 || GET_CODE (trueop1) == CONST_INT))
3021 int width = GET_MODE_BITSIZE (mode);
3022 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3023 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3025 /* Get the two words comprising each integer constant. */
3026 if (GET_CODE (trueop0) == CONST_DOUBLE)
3028 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3029 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3031 else
3033 l0u = l0s = INTVAL (trueop0);
3034 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3037 if (GET_CODE (trueop1) == CONST_DOUBLE)
3039 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3040 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3042 else
3044 l1u = l1s = INTVAL (trueop1);
3045 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3048 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3049 we have to sign or zero-extend the values. */
3050 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3052 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3053 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3055 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3056 l0s |= ((HOST_WIDE_INT) (-1) << width);
3058 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3059 l1s |= ((HOST_WIDE_INT) (-1) << width);
3061 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3062 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3064 equal = (h0u == h1u && l0u == l1u);
3065 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3066 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3067 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3068 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3071 /* Otherwise, there are some code-specific tests we can make. */
3072 else
3074 /* Optimize comparisons with upper and lower bounds. */
3075 if (SCALAR_INT_MODE_P (mode)
3076 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3078 rtx mmin, mmax;
3079 int sign;
3081 if (code == GEU
3082 || code == LEU
3083 || code == GTU
3084 || code == LTU)
3085 sign = 0;
3086 else
3087 sign = 1;
3089 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3091 tem = NULL_RTX;
3092 switch (code)
3094 case GEU:
3095 case GE:
3096 /* x >= min is always true. */
3097 if (rtx_equal_p (trueop1, mmin))
3098 tem = const_true_rtx;
3099 else
3100 break;
3102 case LEU:
3103 case LE:
3104 /* x <= max is always true. */
3105 if (rtx_equal_p (trueop1, mmax))
3106 tem = const_true_rtx;
3107 break;
3109 case GTU:
3110 case GT:
3111 /* x > max is always false. */
3112 if (rtx_equal_p (trueop1, mmax))
3113 tem = const0_rtx;
3114 break;
3116 case LTU:
3117 case LT:
3118 /* x < min is always false. */
3119 if (rtx_equal_p (trueop1, mmin))
3120 tem = const0_rtx;
3121 break;
3123 default:
3124 break;
3126 if (tem == const0_rtx
3127 || tem == const_true_rtx)
3128 return tem;
3131 switch (code)
3133 case EQ:
3134 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3135 return const0_rtx;
3136 break;
3138 case NE:
3139 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3140 return const_true_rtx;
3141 break;
3143 case LT:
3144 /* Optimize abs(x) < 0.0. */
3145 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3147 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3148 : trueop0;
3149 if (GET_CODE (tem) == ABS)
3150 return const0_rtx;
3152 break;
3154 case GE:
3155 /* Optimize abs(x) >= 0.0. */
3156 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3158 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3159 : trueop0;
3160 if (GET_CODE (tem) == ABS)
3161 return const_true_rtx;
3163 break;
3165 case UNGE:
3166 /* Optimize ! (abs(x) < 0.0). */
3167 if (trueop1 == CONST0_RTX (mode))
3169 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3170 : trueop0;
3171 if (GET_CODE (tem) == ABS)
3172 return const_true_rtx;
3174 break;
3176 default:
3177 break;
3180 return 0;
3183 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3184 as appropriate. */
3185 switch (code)
3187 case EQ:
3188 case UNEQ:
3189 return equal ? const_true_rtx : const0_rtx;
3190 case NE:
3191 case LTGT:
3192 return ! equal ? const_true_rtx : const0_rtx;
3193 case LT:
3194 case UNLT:
3195 return op0lt ? const_true_rtx : const0_rtx;
3196 case GT:
3197 case UNGT:
3198 return op1lt ? const_true_rtx : const0_rtx;
3199 case LTU:
3200 return op0ltu ? const_true_rtx : const0_rtx;
3201 case GTU:
3202 return op1ltu ? const_true_rtx : const0_rtx;
3203 case LE:
3204 case UNLE:
3205 return equal || op0lt ? const_true_rtx : const0_rtx;
3206 case GE:
3207 case UNGE:
3208 return equal || op1lt ? const_true_rtx : const0_rtx;
3209 case LEU:
3210 return equal || op0ltu ? const_true_rtx : const0_rtx;
3211 case GEU:
3212 return equal || op1ltu ? const_true_rtx : const0_rtx;
3213 case ORDERED:
3214 return const_true_rtx;
3215 case UNORDERED:
3216 return const0_rtx;
3217 default:
3218 gcc_unreachable ();
3222 /* Simplify CODE, an operation with result mode MODE and three operands,
3223 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3224 a constant. Return 0 if no simplifications is possible. */
3227 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3228 enum machine_mode op0_mode, rtx op0, rtx op1,
3229 rtx op2)
3231 unsigned int width = GET_MODE_BITSIZE (mode);
3233 /* VOIDmode means "infinite" precision. */
3234 if (width == 0)
3235 width = HOST_BITS_PER_WIDE_INT;
3237 switch (code)
3239 case SIGN_EXTRACT:
3240 case ZERO_EXTRACT:
3241 if (GET_CODE (op0) == CONST_INT
3242 && GET_CODE (op1) == CONST_INT
3243 && GET_CODE (op2) == CONST_INT
3244 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3245 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3247 /* Extracting a bit-field from a constant */
3248 HOST_WIDE_INT val = INTVAL (op0);
3250 if (BITS_BIG_ENDIAN)
3251 val >>= (GET_MODE_BITSIZE (op0_mode)
3252 - INTVAL (op2) - INTVAL (op1));
3253 else
3254 val >>= INTVAL (op2);
3256 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3258 /* First zero-extend. */
3259 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3260 /* If desired, propagate sign bit. */
3261 if (code == SIGN_EXTRACT
3262 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3263 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3266 /* Clear the bits that don't belong in our mode,
3267 unless they and our sign bit are all one.
3268 So we get either a reasonable negative value or a reasonable
3269 unsigned value for this mode. */
3270 if (width < HOST_BITS_PER_WIDE_INT
3271 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3272 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3273 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3275 return gen_int_mode (val, mode);
3277 break;
3279 case IF_THEN_ELSE:
3280 if (GET_CODE (op0) == CONST_INT)
3281 return op0 != const0_rtx ? op1 : op2;
3283 /* Convert c ? a : a into "a". */
3284 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3285 return op1;
3287 /* Convert a != b ? a : b into "a". */
3288 if (GET_CODE (op0) == NE
3289 && ! side_effects_p (op0)
3290 && ! HONOR_NANS (mode)
3291 && ! HONOR_SIGNED_ZEROS (mode)
3292 && ((rtx_equal_p (XEXP (op0, 0), op1)
3293 && rtx_equal_p (XEXP (op0, 1), op2))
3294 || (rtx_equal_p (XEXP (op0, 0), op2)
3295 && rtx_equal_p (XEXP (op0, 1), op1))))
3296 return op1;
3298 /* Convert a == b ? a : b into "b". */
3299 if (GET_CODE (op0) == EQ
3300 && ! side_effects_p (op0)
3301 && ! HONOR_NANS (mode)
3302 && ! HONOR_SIGNED_ZEROS (mode)
3303 && ((rtx_equal_p (XEXP (op0, 0), op1)
3304 && rtx_equal_p (XEXP (op0, 1), op2))
3305 || (rtx_equal_p (XEXP (op0, 0), op2)
3306 && rtx_equal_p (XEXP (op0, 1), op1))))
3307 return op2;
3309 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3311 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3312 ? GET_MODE (XEXP (op0, 1))
3313 : GET_MODE (XEXP (op0, 0)));
3314 rtx temp;
3316 /* Look for happy constants in op1 and op2. */
3317 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3319 HOST_WIDE_INT t = INTVAL (op1);
3320 HOST_WIDE_INT f = INTVAL (op2);
3322 if (t == STORE_FLAG_VALUE && f == 0)
3323 code = GET_CODE (op0);
3324 else if (t == 0 && f == STORE_FLAG_VALUE)
3326 enum rtx_code tmp;
3327 tmp = reversed_comparison_code (op0, NULL_RTX);
3328 if (tmp == UNKNOWN)
3329 break;
3330 code = tmp;
3332 else
3333 break;
3335 return simplify_gen_relational (code, mode, cmp_mode,
3336 XEXP (op0, 0), XEXP (op0, 1));
3339 if (cmp_mode == VOIDmode)
3340 cmp_mode = op0_mode;
3341 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3342 cmp_mode, XEXP (op0, 0),
3343 XEXP (op0, 1));
3345 /* See if any simplifications were possible. */
3346 if (temp)
3348 if (GET_CODE (temp) == CONST_INT)
3349 return temp == const0_rtx ? op2 : op1;
3350 else if (temp)
3351 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3354 break;
3356 case VEC_MERGE:
3357 gcc_assert (GET_MODE (op0) == mode);
3358 gcc_assert (GET_MODE (op1) == mode);
3359 gcc_assert (VECTOR_MODE_P (mode));
3360 op2 = avoid_constant_pool_reference (op2);
3361 if (GET_CODE (op2) == CONST_INT)
3363 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3364 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3365 int mask = (1 << n_elts) - 1;
3367 if (!(INTVAL (op2) & mask))
3368 return op1;
3369 if ((INTVAL (op2) & mask) == mask)
3370 return op0;
3372 op0 = avoid_constant_pool_reference (op0);
3373 op1 = avoid_constant_pool_reference (op1);
3374 if (GET_CODE (op0) == CONST_VECTOR
3375 && GET_CODE (op1) == CONST_VECTOR)
3377 rtvec v = rtvec_alloc (n_elts);
3378 unsigned int i;
3380 for (i = 0; i < n_elts; i++)
3381 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3382 ? CONST_VECTOR_ELT (op0, i)
3383 : CONST_VECTOR_ELT (op1, i));
3384 return gen_rtx_CONST_VECTOR (mode, v);
3387 break;
3389 default:
3390 gcc_unreachable ();
3393 return 0;
3396 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3397 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3399 Works by unpacking OP into a collection of 8-bit values
3400 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3401 and then repacking them again for OUTERMODE. */
3403 static rtx
3404 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3405 enum machine_mode innermode, unsigned int byte)
3407 /* We support up to 512-bit values (for V8DFmode). */
3408 enum {
3409 max_bitsize = 512,
3410 value_bit = 8,
3411 value_mask = (1 << value_bit) - 1
3413 unsigned char value[max_bitsize / value_bit];
3414 int value_start;
3415 int i;
3416 int elem;
3418 int num_elem;
3419 rtx * elems;
3420 int elem_bitsize;
3421 rtx result_s;
3422 rtvec result_v = NULL;
3423 enum mode_class outer_class;
3424 enum machine_mode outer_submode;
3426 /* Some ports misuse CCmode. */
3427 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3428 return op;
3430 /* We have no way to represent a complex constant at the rtl level. */
3431 if (COMPLEX_MODE_P (outermode))
3432 return NULL_RTX;
3434 /* Unpack the value. */
3436 if (GET_CODE (op) == CONST_VECTOR)
3438 num_elem = CONST_VECTOR_NUNITS (op);
3439 elems = &CONST_VECTOR_ELT (op, 0);
3440 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3442 else
3444 num_elem = 1;
3445 elems = &op;
3446 elem_bitsize = max_bitsize;
3448 /* If this asserts, it is too complicated; reducing value_bit may help. */
3449 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3450 /* I don't know how to handle endianness of sub-units. */
3451 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3453 for (elem = 0; elem < num_elem; elem++)
3455 unsigned char * vp;
3456 rtx el = elems[elem];
3458 /* Vectors are kept in target memory order. (This is probably
3459 a mistake.) */
3461 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3462 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3463 / BITS_PER_UNIT);
3464 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3465 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3466 unsigned bytele = (subword_byte % UNITS_PER_WORD
3467 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3468 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3471 switch (GET_CODE (el))
3473 case CONST_INT:
3474 for (i = 0;
3475 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3476 i += value_bit)
3477 *vp++ = INTVAL (el) >> i;
3478 /* CONST_INTs are always logically sign-extended. */
3479 for (; i < elem_bitsize; i += value_bit)
3480 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3481 break;
3483 case CONST_DOUBLE:
3484 if (GET_MODE (el) == VOIDmode)
3486 /* If this triggers, someone should have generated a
3487 CONST_INT instead. */
3488 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3490 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3491 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3492 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3494 *vp++
3495 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3496 i += value_bit;
3498 /* It shouldn't matter what's done here, so fill it with
3499 zero. */
3500 for (; i < max_bitsize; i += value_bit)
3501 *vp++ = 0;
3503 else
3505 long tmp[max_bitsize / 32];
3506 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3508 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3509 gcc_assert (bitsize <= elem_bitsize);
3510 gcc_assert (bitsize % value_bit == 0);
3512 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3513 GET_MODE (el));
3515 /* real_to_target produces its result in words affected by
3516 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3517 and use WORDS_BIG_ENDIAN instead; see the documentation
3518 of SUBREG in rtl.texi. */
3519 for (i = 0; i < bitsize; i += value_bit)
3521 int ibase;
3522 if (WORDS_BIG_ENDIAN)
3523 ibase = bitsize - 1 - i;
3524 else
3525 ibase = i;
3526 *vp++ = tmp[ibase / 32] >> i % 32;
3529 /* It shouldn't matter what's done here, so fill it with
3530 zero. */
3531 for (; i < elem_bitsize; i += value_bit)
3532 *vp++ = 0;
3534 break;
3536 default:
3537 gcc_unreachable ();
3541 /* Now, pick the right byte to start with. */
3542 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3543 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3544 will already have offset 0. */
3545 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3547 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3548 - byte);
3549 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3550 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3551 byte = (subword_byte % UNITS_PER_WORD
3552 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3555 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3556 so if it's become negative it will instead be very large.) */
3557 gcc_assert (byte < GET_MODE_SIZE (innermode));
3559 /* Convert from bytes to chunks of size value_bit. */
3560 value_start = byte * (BITS_PER_UNIT / value_bit);
3562 /* Re-pack the value. */
3564 if (VECTOR_MODE_P (outermode))
3566 num_elem = GET_MODE_NUNITS (outermode);
3567 result_v = rtvec_alloc (num_elem);
3568 elems = &RTVEC_ELT (result_v, 0);
3569 outer_submode = GET_MODE_INNER (outermode);
3571 else
3573 num_elem = 1;
3574 elems = &result_s;
3575 outer_submode = outermode;
3578 outer_class = GET_MODE_CLASS (outer_submode);
3579 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3581 gcc_assert (elem_bitsize % value_bit == 0);
3582 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3584 for (elem = 0; elem < num_elem; elem++)
3586 unsigned char *vp;
3588 /* Vectors are stored in target memory order. (This is probably
3589 a mistake.) */
3591 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3592 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3593 / BITS_PER_UNIT);
3594 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3595 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3596 unsigned bytele = (subword_byte % UNITS_PER_WORD
3597 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3598 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3601 switch (outer_class)
3603 case MODE_INT:
3604 case MODE_PARTIAL_INT:
3606 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3608 for (i = 0;
3609 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3610 i += value_bit)
3611 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3612 for (; i < elem_bitsize; i += value_bit)
3613 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3614 << (i - HOST_BITS_PER_WIDE_INT));
3616 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3617 know why. */
3618 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3619 elems[elem] = gen_int_mode (lo, outer_submode);
3620 else
3621 elems[elem] = immed_double_const (lo, hi, outer_submode);
3623 break;
3625 case MODE_FLOAT:
3627 REAL_VALUE_TYPE r;
3628 long tmp[max_bitsize / 32];
3630 /* real_from_target wants its input in words affected by
3631 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3632 and use WORDS_BIG_ENDIAN instead; see the documentation
3633 of SUBREG in rtl.texi. */
3634 for (i = 0; i < max_bitsize / 32; i++)
3635 tmp[i] = 0;
3636 for (i = 0; i < elem_bitsize; i += value_bit)
3638 int ibase;
3639 if (WORDS_BIG_ENDIAN)
3640 ibase = elem_bitsize - 1 - i;
3641 else
3642 ibase = i;
3643 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3646 real_from_target (&r, tmp, outer_submode);
3647 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3649 break;
3651 default:
3652 gcc_unreachable ();
3655 if (VECTOR_MODE_P (outermode))
3656 return gen_rtx_CONST_VECTOR (outermode, result_v);
3657 else
3658 return result_s;
3661 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3662 Return 0 if no simplifications are possible. */
3664 simplify_subreg (enum machine_mode outermode, rtx op,
3665 enum machine_mode innermode, unsigned int byte)
3667 /* Little bit of sanity checking. */
3668 gcc_assert (innermode != VOIDmode);
3669 gcc_assert (outermode != VOIDmode);
3670 gcc_assert (innermode != BLKmode);
3671 gcc_assert (outermode != BLKmode);
3673 gcc_assert (GET_MODE (op) == innermode
3674 || GET_MODE (op) == VOIDmode);
3676 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3677 gcc_assert (byte < GET_MODE_SIZE (innermode));
3679 if (outermode == innermode && !byte)
3680 return op;
3682 if (GET_CODE (op) == CONST_INT
3683 || GET_CODE (op) == CONST_DOUBLE
3684 || GET_CODE (op) == CONST_VECTOR)
3685 return simplify_immed_subreg (outermode, op, innermode, byte);
3687 /* Changing mode twice with SUBREG => just change it once,
3688 or not at all if changing back op starting mode. */
3689 if (GET_CODE (op) == SUBREG)
3691 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3692 int final_offset = byte + SUBREG_BYTE (op);
3693 rtx newx;
3695 if (outermode == innermostmode
3696 && byte == 0 && SUBREG_BYTE (op) == 0)
3697 return SUBREG_REG (op);
3699 /* The SUBREG_BYTE represents offset, as if the value were stored
3700 in memory. Irritating exception is paradoxical subreg, where
3701 we define SUBREG_BYTE to be 0. On big endian machines, this
3702 value should be negative. For a moment, undo this exception. */
3703 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3705 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3706 if (WORDS_BIG_ENDIAN)
3707 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3708 if (BYTES_BIG_ENDIAN)
3709 final_offset += difference % UNITS_PER_WORD;
3711 if (SUBREG_BYTE (op) == 0
3712 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3714 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3715 if (WORDS_BIG_ENDIAN)
3716 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3717 if (BYTES_BIG_ENDIAN)
3718 final_offset += difference % UNITS_PER_WORD;
3721 /* See whether resulting subreg will be paradoxical. */
3722 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3724 /* In nonparadoxical subregs we can't handle negative offsets. */
3725 if (final_offset < 0)
3726 return NULL_RTX;
3727 /* Bail out in case resulting subreg would be incorrect. */
3728 if (final_offset % GET_MODE_SIZE (outermode)
3729 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3730 return NULL_RTX;
3732 else
3734 int offset = 0;
3735 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3737 /* In paradoxical subreg, see if we are still looking on lower part.
3738 If so, our SUBREG_BYTE will be 0. */
3739 if (WORDS_BIG_ENDIAN)
3740 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3741 if (BYTES_BIG_ENDIAN)
3742 offset += difference % UNITS_PER_WORD;
3743 if (offset == final_offset)
3744 final_offset = 0;
3745 else
3746 return NULL_RTX;
3749 /* Recurse for further possible simplifications. */
3750 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3751 final_offset);
3752 if (newx)
3753 return newx;
3754 if (validate_subreg (outermode, innermostmode,
3755 SUBREG_REG (op), final_offset))
3756 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3757 return NULL_RTX;
3760 /* SUBREG of a hard register => just change the register number
3761 and/or mode. If the hard register is not valid in that mode,
3762 suppress this simplification. If the hard register is the stack,
3763 frame, or argument pointer, leave this as a SUBREG. */
3765 if (REG_P (op)
3766 && REGNO (op) < FIRST_PSEUDO_REGISTER
3767 #ifdef CANNOT_CHANGE_MODE_CLASS
3768 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3769 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3770 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3771 #endif
3772 && ((reload_completed && !frame_pointer_needed)
3773 || (REGNO (op) != FRAME_POINTER_REGNUM
3774 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3775 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3776 #endif
3778 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3779 && REGNO (op) != ARG_POINTER_REGNUM
3780 #endif
3781 && REGNO (op) != STACK_POINTER_REGNUM
3782 && subreg_offset_representable_p (REGNO (op), innermode,
3783 byte, outermode))
3785 unsigned int regno = REGNO (op);
3786 unsigned int final_regno
3787 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3789 /* ??? We do allow it if the current REG is not valid for
3790 its mode. This is a kludge to work around how float/complex
3791 arguments are passed on 32-bit SPARC and should be fixed. */
3792 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3793 || ! HARD_REGNO_MODE_OK (regno, innermode))
3795 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3797 /* Propagate original regno. We don't have any way to specify
3798 the offset inside original regno, so do so only for lowpart.
3799 The information is used only by alias analysis that can not
3800 grog partial register anyway. */
3802 if (subreg_lowpart_offset (outermode, innermode) == byte)
3803 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3804 return x;
3808 /* If we have a SUBREG of a register that we are replacing and we are
3809 replacing it with a MEM, make a new MEM and try replacing the
3810 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3811 or if we would be widening it. */
3813 if (MEM_P (op)
3814 && ! mode_dependent_address_p (XEXP (op, 0))
3815 /* Allow splitting of volatile memory references in case we don't
3816 have instruction to move the whole thing. */
3817 && (! MEM_VOLATILE_P (op)
3818 || ! have_insn_for (SET, innermode))
3819 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3820 return adjust_address_nv (op, outermode, byte);
3822 /* Handle complex values represented as CONCAT
3823 of real and imaginary part. */
3824 if (GET_CODE (op) == CONCAT)
3826 unsigned int inner_size, final_offset;
3827 rtx part, res;
3829 inner_size = GET_MODE_UNIT_SIZE (innermode);
3830 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3831 final_offset = byte % inner_size;
3832 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3833 return NULL_RTX;
3835 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3836 if (res)
3837 return res;
3838 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3839 return gen_rtx_SUBREG (outermode, part, final_offset);
3840 return NULL_RTX;
3843 /* Optimize SUBREG truncations of zero and sign extended values. */
3844 if ((GET_CODE (op) == ZERO_EXTEND
3845 || GET_CODE (op) == SIGN_EXTEND)
3846 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3848 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3850 /* If we're requesting the lowpart of a zero or sign extension,
3851 there are three possibilities. If the outermode is the same
3852 as the origmode, we can omit both the extension and the subreg.
3853 If the outermode is not larger than the origmode, we can apply
3854 the truncation without the extension. Finally, if the outermode
3855 is larger than the origmode, but both are integer modes, we
3856 can just extend to the appropriate mode. */
3857 if (bitpos == 0)
3859 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3860 if (outermode == origmode)
3861 return XEXP (op, 0);
3862 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3863 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3864 subreg_lowpart_offset (outermode,
3865 origmode));
3866 if (SCALAR_INT_MODE_P (outermode))
3867 return simplify_gen_unary (GET_CODE (op), outermode,
3868 XEXP (op, 0), origmode);
3871 /* A SUBREG resulting from a zero extension may fold to zero if
3872 it extracts higher bits that the ZERO_EXTEND's source bits. */
3873 if (GET_CODE (op) == ZERO_EXTEND
3874 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3875 return CONST0_RTX (outermode);
3878 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3879 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3880 the outer subreg is effectively a truncation to the original mode. */
3881 if ((GET_CODE (op) == LSHIFTRT
3882 || GET_CODE (op) == ASHIFTRT)
3883 && SCALAR_INT_MODE_P (outermode)
3884 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3885 to avoid the possibility that an outer LSHIFTRT shifts by more
3886 than the sign extension's sign_bit_copies and introduces zeros
3887 into the high bits of the result. */
3888 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
3889 && GET_CODE (XEXP (op, 1)) == CONST_INT
3890 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
3891 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3892 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3893 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3894 return simplify_gen_binary (ASHIFTRT, outermode,
3895 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3897 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
3898 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
3899 the outer subreg is effectively a truncation to the original mode. */
3900 if ((GET_CODE (op) == LSHIFTRT
3901 || GET_CODE (op) == ASHIFTRT)
3902 && SCALAR_INT_MODE_P (outermode)
3903 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3904 && GET_CODE (XEXP (op, 1)) == CONST_INT
3905 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3906 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3907 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3908 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3909 return simplify_gen_binary (LSHIFTRT, outermode,
3910 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3912 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
3913 to (ashift:QI (x:QI) C), where C is a suitable small constant and
3914 the outer subreg is effectively a truncation to the original mode. */
3915 if (GET_CODE (op) == ASHIFT
3916 && SCALAR_INT_MODE_P (outermode)
3917 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3918 && GET_CODE (XEXP (op, 1)) == CONST_INT
3919 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3920 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
3921 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3922 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3923 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3924 return simplify_gen_binary (ASHIFT, outermode,
3925 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3927 return NULL_RTX;
3930 /* Make a SUBREG operation or equivalent if it folds. */
3933 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3934 enum machine_mode innermode, unsigned int byte)
3936 rtx newx;
3938 newx = simplify_subreg (outermode, op, innermode, byte);
3939 if (newx)
3940 return newx;
3942 if (GET_CODE (op) == SUBREG
3943 || GET_CODE (op) == CONCAT
3944 || GET_MODE (op) == VOIDmode)
3945 return NULL_RTX;
3947 if (validate_subreg (outermode, innermode, op, byte))
3948 return gen_rtx_SUBREG (outermode, op, byte);
3950 return NULL_RTX;
3953 /* Simplify X, an rtx expression.
3955 Return the simplified expression or NULL if no simplifications
3956 were possible.
3958 This is the preferred entry point into the simplification routines;
3959 however, we still allow passes to call the more specific routines.
3961 Right now GCC has three (yes, three) major bodies of RTL simplification
3962 code that need to be unified.
3964 1. fold_rtx in cse.c. This code uses various CSE specific
3965 information to aid in RTL simplification.
3967 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3968 it uses combine specific information to aid in RTL
3969 simplification.
3971 3. The routines in this file.
3974 Long term we want to only have one body of simplification code; to
3975 get to that state I recommend the following steps:
3977 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3978 which are not pass dependent state into these routines.
3980 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3981 use this routine whenever possible.
3983 3. Allow for pass dependent state to be provided to these
3984 routines and add simplifications based on the pass dependent
3985 state. Remove code from cse.c & combine.c that becomes
3986 redundant/dead.
3988 It will take time, but ultimately the compiler will be easier to
3989 maintain and improve. It's totally silly that when we add a
3990 simplification that it needs to be added to 4 places (3 for RTL
3991 simplification and 1 for tree simplification. */
3994 simplify_rtx (rtx x)
3996 enum rtx_code code = GET_CODE (x);
3997 enum machine_mode mode = GET_MODE (x);
3999 switch (GET_RTX_CLASS (code))
4001 case RTX_UNARY:
4002 return simplify_unary_operation (code, mode,
4003 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4004 case RTX_COMM_ARITH:
4005 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4006 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4008 /* Fall through.... */
4010 case RTX_BIN_ARITH:
4011 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4013 case RTX_TERNARY:
4014 case RTX_BITFIELD_OPS:
4015 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4016 XEXP (x, 0), XEXP (x, 1),
4017 XEXP (x, 2));
4019 case RTX_COMPARE:
4020 case RTX_COMM_COMPARE:
4021 return simplify_relational_operation (code, mode,
4022 ((GET_MODE (XEXP (x, 0))
4023 != VOIDmode)
4024 ? GET_MODE (XEXP (x, 0))
4025 : GET_MODE (XEXP (x, 1))),
4026 XEXP (x, 0),
4027 XEXP (x, 1));
4029 case RTX_EXTRA:
4030 if (code == SUBREG)
4031 return simplify_gen_subreg (mode, SUBREG_REG (x),
4032 GET_MODE (SUBREG_REG (x)),
4033 SUBREG_BYTE (x));
4034 break;
4036 case RTX_OBJ:
4037 if (code == LO_SUM)
4039 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4040 if (GET_CODE (XEXP (x, 0)) == HIGH
4041 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4042 return XEXP (x, 1);
4044 break;
4046 default:
4047 break;
4049 return NULL;