* cp-tree.h (note_decl_for_pch): New function.
[official-gcc.git] / gcc / simplify-rtx.c
blob5feeb650dc97f5ca9c0a492f8dbd4abd69dde529
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, rtx i)
69 return gen_int_mode (- INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 bool
76 mode_signbit_p (enum machine_mode mode, rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_BITSIZE (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x) == CONST_INT)
90 val = INTVAL (x);
91 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
98 else
99 return false;
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
110 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
111 rtx op1)
113 rtx tem;
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
122 if (tem)
123 return tem;
125 /* Handle addition and subtraction specially. Otherwise, just form
126 the operation. */
128 if (code == PLUS || code == MINUS)
130 tem = simplify_plus_minus (code, mode, op0, op1, 1);
131 if (tem)
132 return tem;
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
141 avoid_constant_pool_reference (rtx x)
143 rtx c, tmp, addr;
144 enum machine_mode cmode;
146 switch (GET_CODE (x))
148 case MEM:
149 break;
151 case FLOAT_EXTEND:
152 /* Handle float extensions of constant pool references. */
153 tmp = XEXP (x, 0);
154 c = avoid_constant_pool_reference (tmp);
155 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
157 REAL_VALUE_TYPE d;
159 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
162 return x;
164 default:
165 return x;
168 addr = XEXP (x, 0);
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr = targetm.delegitimize_address (addr);
173 if (GET_CODE (addr) == LO_SUM)
174 addr = XEXP (addr, 1);
176 if (GET_CODE (addr) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr))
178 return x;
180 c = get_pool_constant (addr);
181 cmode = get_pool_mode (addr);
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode != GET_MODE (x))
188 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
189 return c ? c : x;
192 return c;
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
199 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
200 enum machine_mode op_mode)
202 rtx tem;
204 /* If this simplifies, use it. */
205 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
206 return tem;
208 return gen_rtx_fmt_e (code, mode, op);
211 /* Likewise for ternary operations. */
214 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
215 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
217 rtx tem;
219 /* If this simplifies, use it. */
220 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
221 op0, op1, op2)))
222 return tem;
224 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
231 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
232 enum machine_mode cmp_mode, rtx op0, rtx op1)
234 rtx tem;
236 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
237 op0, op1)))
238 return tem;
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
243 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251 enum machine_mode op_mode;
252 rtx op0, op1, op2;
254 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
258 if (x == old_rtx)
259 return new_rtx;
261 switch (GET_RTX_CLASS (code))
263 case RTX_UNARY:
264 op0 = XEXP (x, 0);
265 op_mode = GET_MODE (op0);
266 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
267 if (op0 == XEXP (x, 0))
268 return x;
269 return simplify_gen_unary (code, mode, op0, op_mode);
271 case RTX_BIN_ARITH:
272 case RTX_COMM_ARITH:
273 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
274 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
275 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
276 return x;
277 return simplify_gen_binary (code, mode, op0, op1);
279 case RTX_COMPARE:
280 case RTX_COMM_COMPARE:
281 op0 = XEXP (x, 0);
282 op1 = XEXP (x, 1);
283 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
284 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return x;
288 return simplify_gen_relational (code, mode, op_mode, op0, op1);
290 case RTX_TERNARY:
291 case RTX_BITFIELD_OPS:
292 op0 = XEXP (x, 0);
293 op_mode = GET_MODE (op0);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
296 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
298 return x;
299 if (op_mode == VOIDmode)
300 op_mode = GET_MODE (op0);
301 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
303 case RTX_EXTRA:
304 /* The only case we try to handle is a SUBREG. */
305 if (code == SUBREG)
307 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
308 if (op0 == SUBREG_REG (x))
309 return x;
310 op0 = simplify_gen_subreg (GET_MODE (x), op0,
311 GET_MODE (SUBREG_REG (x)),
312 SUBREG_BYTE (x));
313 return op0 ? op0 : x;
315 break;
317 case RTX_OBJ:
318 if (code == MEM)
320 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
321 if (op0 == XEXP (x, 0))
322 return x;
323 return replace_equiv_address_nv (x, op0);
325 else if (code == LO_SUM)
327 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
328 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
332 return op1;
334 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
335 return x;
336 return gen_rtx_LO_SUM (mode, op0, op1);
338 else if (code == REG)
340 if (rtx_equal_p (x, old_rtx))
341 return new_rtx;
343 break;
345 default:
346 break;
348 return x;
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
355 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
356 rtx op, enum machine_mode op_mode)
358 unsigned int width = GET_MODE_BITSIZE (mode);
359 rtx trueop = avoid_constant_pool_reference (op);
361 if (code == VEC_DUPLICATE)
363 gcc_assert (VECTOR_MODE_P (mode));
364 if (GET_MODE (trueop) != VOIDmode)
366 if (!VECTOR_MODE_P (GET_MODE (trueop)))
367 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
368 else
369 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
370 (GET_MODE (trueop)));
372 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
373 || GET_CODE (trueop) == CONST_VECTOR)
375 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
376 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
377 rtvec v = rtvec_alloc (n_elts);
378 unsigned int i;
380 if (GET_CODE (trueop) != CONST_VECTOR)
381 for (i = 0; i < n_elts; i++)
382 RTVEC_ELT (v, i) = trueop;
383 else
385 enum machine_mode inmode = GET_MODE (trueop);
386 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
387 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
389 gcc_assert (in_n_elts < n_elts);
390 gcc_assert ((n_elts % in_n_elts) == 0);
391 for (i = 0; i < n_elts; i++)
392 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
394 return gen_rtx_CONST_VECTOR (mode, v);
397 else if (GET_CODE (op) == CONST)
398 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
400 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
402 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
403 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
404 enum machine_mode opmode = GET_MODE (trueop);
405 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
406 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
407 rtvec v = rtvec_alloc (n_elts);
408 unsigned int i;
410 gcc_assert (op_n_elts == n_elts);
411 for (i = 0; i < n_elts; i++)
413 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
414 CONST_VECTOR_ELT (trueop, i),
415 GET_MODE_INNER (opmode));
416 if (!x)
417 return 0;
418 RTVEC_ELT (v, i) = x;
420 return gen_rtx_CONST_VECTOR (mode, v);
423 /* The order of these tests is critical so that, for example, we don't
424 check the wrong mode (input vs. output) for a conversion operation,
425 such as FIX. At some point, this should be simplified. */
427 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
428 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
430 HOST_WIDE_INT hv, lv;
431 REAL_VALUE_TYPE d;
433 if (GET_CODE (trueop) == CONST_INT)
434 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
435 else
436 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
438 REAL_VALUE_FROM_INT (d, lv, hv, mode);
439 d = real_value_truncate (mode, d);
440 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
442 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
443 && (GET_CODE (trueop) == CONST_DOUBLE
444 || GET_CODE (trueop) == CONST_INT))
446 HOST_WIDE_INT hv, lv;
447 REAL_VALUE_TYPE d;
449 if (GET_CODE (trueop) == CONST_INT)
450 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
451 else
452 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
454 if (op_mode == VOIDmode)
456 /* We don't know how to interpret negative-looking numbers in
457 this case, so don't try to fold those. */
458 if (hv < 0)
459 return 0;
461 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
463 else
464 hv = 0, lv &= GET_MODE_MASK (op_mode);
466 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
467 d = real_value_truncate (mode, d);
468 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
471 if (GET_CODE (trueop) == CONST_INT
472 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
474 HOST_WIDE_INT arg0 = INTVAL (trueop);
475 HOST_WIDE_INT val;
477 switch (code)
479 case NOT:
480 val = ~ arg0;
481 break;
483 case NEG:
484 val = - arg0;
485 break;
487 case ABS:
488 val = (arg0 >= 0 ? arg0 : - arg0);
489 break;
491 case FFS:
492 /* Don't use ffs here. Instead, get low order bit and then its
493 number. If arg0 is zero, this will return 0, as desired. */
494 arg0 &= GET_MODE_MASK (mode);
495 val = exact_log2 (arg0 & (- arg0)) + 1;
496 break;
498 case CLZ:
499 arg0 &= GET_MODE_MASK (mode);
500 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
502 else
503 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
504 break;
506 case CTZ:
507 arg0 &= GET_MODE_MASK (mode);
508 if (arg0 == 0)
510 /* Even if the value at zero is undefined, we have to come
511 up with some replacement. Seems good enough. */
512 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
513 val = GET_MODE_BITSIZE (mode);
515 else
516 val = exact_log2 (arg0 & -arg0);
517 break;
519 case POPCOUNT:
520 arg0 &= GET_MODE_MASK (mode);
521 val = 0;
522 while (arg0)
523 val++, arg0 &= arg0 - 1;
524 break;
526 case PARITY:
527 arg0 &= GET_MODE_MASK (mode);
528 val = 0;
529 while (arg0)
530 val++, arg0 &= arg0 - 1;
531 val &= 1;
532 break;
534 case TRUNCATE:
535 val = arg0;
536 break;
538 case ZERO_EXTEND:
539 /* When zero-extending a CONST_INT, we need to know its
540 original mode. */
541 gcc_assert (op_mode != VOIDmode);
542 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
544 /* If we were really extending the mode,
545 we would have to distinguish between zero-extension
546 and sign-extension. */
547 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
548 val = arg0;
550 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
551 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
552 else
553 return 0;
554 break;
556 case SIGN_EXTEND:
557 if (op_mode == VOIDmode)
558 op_mode = mode;
559 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
561 /* If we were really extending the mode,
562 we would have to distinguish between zero-extension
563 and sign-extension. */
564 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
565 val = arg0;
567 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
570 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
571 if (val
572 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
573 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
575 else
576 return 0;
577 break;
579 case SQRT:
580 case FLOAT_EXTEND:
581 case FLOAT_TRUNCATE:
582 case SS_TRUNCATE:
583 case US_TRUNCATE:
584 return 0;
586 default:
587 gcc_unreachable ();
590 val = trunc_int_for_mode (val, mode);
592 return GEN_INT (val);
595 /* We can do some operations on integer CONST_DOUBLEs. Also allow
596 for a DImode operation on a CONST_INT. */
597 else if (GET_MODE (trueop) == VOIDmode
598 && width <= HOST_BITS_PER_WIDE_INT * 2
599 && (GET_CODE (trueop) == CONST_DOUBLE
600 || GET_CODE (trueop) == CONST_INT))
602 unsigned HOST_WIDE_INT l1, lv;
603 HOST_WIDE_INT h1, hv;
605 if (GET_CODE (trueop) == CONST_DOUBLE)
606 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
607 else
608 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
610 switch (code)
612 case NOT:
613 lv = ~ l1;
614 hv = ~ h1;
615 break;
617 case NEG:
618 neg_double (l1, h1, &lv, &hv);
619 break;
621 case ABS:
622 if (h1 < 0)
623 neg_double (l1, h1, &lv, &hv);
624 else
625 lv = l1, hv = h1;
626 break;
628 case FFS:
629 hv = 0;
630 if (l1 == 0)
632 if (h1 == 0)
633 lv = 0;
634 else
635 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
637 else
638 lv = exact_log2 (l1 & -l1) + 1;
639 break;
641 case CLZ:
642 hv = 0;
643 if (h1 != 0)
644 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
645 - HOST_BITS_PER_WIDE_INT;
646 else if (l1 != 0)
647 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
648 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
649 lv = GET_MODE_BITSIZE (mode);
650 break;
652 case CTZ:
653 hv = 0;
654 if (l1 != 0)
655 lv = exact_log2 (l1 & -l1);
656 else if (h1 != 0)
657 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
658 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
659 lv = GET_MODE_BITSIZE (mode);
660 break;
662 case POPCOUNT:
663 hv = 0;
664 lv = 0;
665 while (l1)
666 lv++, l1 &= l1 - 1;
667 while (h1)
668 lv++, h1 &= h1 - 1;
669 break;
671 case PARITY:
672 hv = 0;
673 lv = 0;
674 while (l1)
675 lv++, l1 &= l1 - 1;
676 while (h1)
677 lv++, h1 &= h1 - 1;
678 lv &= 1;
679 break;
681 case TRUNCATE:
682 /* This is just a change-of-mode, so do nothing. */
683 lv = l1, hv = h1;
684 break;
686 case ZERO_EXTEND:
687 gcc_assert (op_mode != VOIDmode);
689 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
690 return 0;
692 hv = 0;
693 lv = l1 & GET_MODE_MASK (op_mode);
694 break;
696 case SIGN_EXTEND:
697 if (op_mode == VOIDmode
698 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
699 return 0;
700 else
702 lv = l1 & GET_MODE_MASK (op_mode);
703 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
704 && (lv & ((HOST_WIDE_INT) 1
705 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
706 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
708 hv = HWI_SIGN_EXTEND (lv);
710 break;
712 case SQRT:
713 return 0;
715 default:
716 return 0;
719 return immed_double_const (lv, hv, mode);
722 else if (GET_CODE (trueop) == CONST_DOUBLE
723 && GET_MODE_CLASS (mode) == MODE_FLOAT)
725 REAL_VALUE_TYPE d, t;
726 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
728 switch (code)
730 case SQRT:
731 if (HONOR_SNANS (mode) && real_isnan (&d))
732 return 0;
733 real_sqrt (&t, mode, &d);
734 d = t;
735 break;
736 case ABS:
737 d = REAL_VALUE_ABS (d);
738 break;
739 case NEG:
740 d = REAL_VALUE_NEGATE (d);
741 break;
742 case FLOAT_TRUNCATE:
743 d = real_value_truncate (mode, d);
744 break;
745 case FLOAT_EXTEND:
746 /* All this does is change the mode. */
747 break;
748 case FIX:
749 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
750 break;
751 case NOT:
753 long tmp[4];
754 int i;
756 real_to_target (tmp, &d, GET_MODE (trueop));
757 for (i = 0; i < 4; i++)
758 tmp[i] = ~tmp[i];
759 real_from_target (&d, tmp, mode);
761 default:
762 gcc_unreachable ();
764 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
767 else if (GET_CODE (trueop) == CONST_DOUBLE
768 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
769 && GET_MODE_CLASS (mode) == MODE_INT
770 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
772 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
773 operators are intentionally left unspecified (to ease implementation
774 by target backends), for consistency, this routine implements the
775 same semantics for constant folding as used by the middle-end. */
777 HOST_WIDE_INT xh, xl, th, tl;
778 REAL_VALUE_TYPE x, t;
779 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
780 switch (code)
782 case FIX:
783 if (REAL_VALUE_ISNAN (x))
784 return const0_rtx;
786 /* Test against the signed upper bound. */
787 if (width > HOST_BITS_PER_WIDE_INT)
789 th = ((unsigned HOST_WIDE_INT) 1
790 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
791 tl = -1;
793 else
795 th = 0;
796 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
798 real_from_integer (&t, VOIDmode, tl, th, 0);
799 if (REAL_VALUES_LESS (t, x))
801 xh = th;
802 xl = tl;
803 break;
806 /* Test against the signed lower bound. */
807 if (width > HOST_BITS_PER_WIDE_INT)
809 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
810 tl = 0;
812 else
814 th = -1;
815 tl = (HOST_WIDE_INT) -1 << (width - 1);
817 real_from_integer (&t, VOIDmode, tl, th, 0);
818 if (REAL_VALUES_LESS (x, t))
820 xh = th;
821 xl = tl;
822 break;
824 REAL_VALUE_TO_INT (&xl, &xh, x);
825 break;
827 case UNSIGNED_FIX:
828 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
829 return const0_rtx;
831 /* Test against the unsigned upper bound. */
832 if (width == 2*HOST_BITS_PER_WIDE_INT)
834 th = -1;
835 tl = -1;
837 else if (width >= HOST_BITS_PER_WIDE_INT)
839 th = ((unsigned HOST_WIDE_INT) 1
840 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
841 tl = -1;
843 else
845 th = 0;
846 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
848 real_from_integer (&t, VOIDmode, tl, th, 1);
849 if (REAL_VALUES_LESS (t, x))
851 xh = th;
852 xl = tl;
853 break;
856 REAL_VALUE_TO_INT (&xl, &xh, x);
857 break;
859 default:
860 gcc_unreachable ();
862 return immed_double_const (xl, xh, mode);
865 /* This was formerly used only for non-IEEE float.
866 eggert@twinsun.com says it is safe for IEEE also. */
867 else
869 enum rtx_code reversed;
870 rtx temp;
872 /* There are some simplifications we can do even if the operands
873 aren't constant. */
874 switch (code)
876 case NOT:
877 /* (not (not X)) == X. */
878 if (GET_CODE (op) == NOT)
879 return XEXP (op, 0);
881 /* (not (eq X Y)) == (ne X Y), etc. */
882 if (COMPARISON_P (op)
883 && (mode == BImode || STORE_FLAG_VALUE == -1)
884 && ((reversed = reversed_comparison_code (op, NULL_RTX))
885 != UNKNOWN))
886 return simplify_gen_relational (reversed, mode, VOIDmode,
887 XEXP (op, 0), XEXP (op, 1));
889 /* (not (plus X -1)) can become (neg X). */
890 if (GET_CODE (op) == PLUS
891 && XEXP (op, 1) == constm1_rtx)
892 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
894 /* Similarly, (not (neg X)) is (plus X -1). */
895 if (GET_CODE (op) == NEG)
896 return plus_constant (XEXP (op, 0), -1);
898 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
899 if (GET_CODE (op) == XOR
900 && GET_CODE (XEXP (op, 1)) == CONST_INT
901 && (temp = simplify_unary_operation (NOT, mode,
902 XEXP (op, 1),
903 mode)) != 0)
904 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
906 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
907 if (GET_CODE (op) == PLUS
908 && GET_CODE (XEXP (op, 1)) == CONST_INT
909 && mode_signbit_p (mode, XEXP (op, 1))
910 && (temp = simplify_unary_operation (NOT, mode,
911 XEXP (op, 1),
912 mode)) != 0)
913 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
917 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
918 operands other than 1, but that is not valid. We could do a
919 similar simplification for (not (lshiftrt C X)) where C is
920 just the sign bit, but this doesn't seem common enough to
921 bother with. */
922 if (GET_CODE (op) == ASHIFT
923 && XEXP (op, 0) == const1_rtx)
925 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
926 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
929 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
930 by reversing the comparison code if valid. */
931 if (STORE_FLAG_VALUE == -1
932 && COMPARISON_P (op)
933 && (reversed = reversed_comparison_code (op, NULL_RTX))
934 != UNKNOWN)
935 return simplify_gen_relational (reversed, mode, VOIDmode,
936 XEXP (op, 0), XEXP (op, 1));
938 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
939 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
940 so we can perform the above simplification. */
942 if (STORE_FLAG_VALUE == -1
943 && GET_CODE (op) == ASHIFTRT
944 && GET_CODE (XEXP (op, 1)) == CONST_INT
945 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
946 return simplify_gen_relational (GE, mode, VOIDmode,
947 XEXP (op, 0), const0_rtx);
949 break;
951 case NEG:
952 /* (neg (neg X)) == X. */
953 if (GET_CODE (op) == NEG)
954 return XEXP (op, 0);
956 /* (neg (plus X 1)) can become (not X). */
957 if (GET_CODE (op) == PLUS
958 && XEXP (op, 1) == const1_rtx)
959 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
961 /* Similarly, (neg (not X)) is (plus X 1). */
962 if (GET_CODE (op) == NOT)
963 return plus_constant (XEXP (op, 0), 1);
965 /* (neg (minus X Y)) can become (minus Y X). This transformation
966 isn't safe for modes with signed zeros, since if X and Y are
967 both +0, (minus Y X) is the same as (minus X Y). If the
968 rounding mode is towards +infinity (or -infinity) then the two
969 expressions will be rounded differently. */
970 if (GET_CODE (op) == MINUS
971 && !HONOR_SIGNED_ZEROS (mode)
972 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
973 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
974 XEXP (op, 0));
976 if (GET_CODE (op) == PLUS
977 && !HONOR_SIGNED_ZEROS (mode)
978 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
980 /* (neg (plus A C)) is simplified to (minus -C A). */
981 if (GET_CODE (XEXP (op, 1)) == CONST_INT
982 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
984 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
985 mode);
986 if (temp)
987 return simplify_gen_binary (MINUS, mode, temp,
988 XEXP (op, 0));
991 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
992 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
993 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
996 /* (neg (mult A B)) becomes (mult (neg A) B).
997 This works even for floating-point values. */
998 if (GET_CODE (op) == MULT
999 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1001 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1002 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1005 /* NEG commutes with ASHIFT since it is multiplication. Only do
1006 this if we can then eliminate the NEG (e.g., if the operand
1007 is a constant). */
1008 if (GET_CODE (op) == ASHIFT)
1010 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1011 mode);
1012 if (temp)
1013 return simplify_gen_binary (ASHIFT, mode, temp,
1014 XEXP (op, 1));
1017 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op) == ASHIFTRT
1020 && GET_CODE (XEXP (op, 1)) == CONST_INT
1021 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1022 return simplify_gen_binary (LSHIFTRT, mode,
1023 XEXP (op, 0), XEXP (op, 1));
1025 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1026 C is equal to the width of MODE minus 1. */
1027 if (GET_CODE (op) == LSHIFTRT
1028 && GET_CODE (XEXP (op, 1)) == CONST_INT
1029 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1030 return simplify_gen_binary (ASHIFTRT, mode,
1031 XEXP (op, 0), XEXP (op, 1));
1033 break;
1035 case SIGN_EXTEND:
1036 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1037 becomes just the MINUS if its mode is MODE. This allows
1038 folding switch statements on machines using casesi (such as
1039 the VAX). */
1040 if (GET_CODE (op) == TRUNCATE
1041 && GET_MODE (XEXP (op, 0)) == mode
1042 && GET_CODE (XEXP (op, 0)) == MINUS
1043 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1044 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1045 return XEXP (op, 0);
1047 /* Check for a sign extension of a subreg of a promoted
1048 variable, where the promotion is sign-extended, and the
1049 target mode is the same as the variable's promotion. */
1050 if (GET_CODE (op) == SUBREG
1051 && SUBREG_PROMOTED_VAR_P (op)
1052 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1053 && GET_MODE (XEXP (op, 0)) == mode)
1054 return XEXP (op, 0);
1056 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1057 if (! POINTERS_EXTEND_UNSIGNED
1058 && mode == Pmode && GET_MODE (op) == ptr_mode
1059 && (CONSTANT_P (op)
1060 || (GET_CODE (op) == SUBREG
1061 && REG_P (SUBREG_REG (op))
1062 && REG_POINTER (SUBREG_REG (op))
1063 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1064 return convert_memory_address (Pmode, op);
1065 #endif
1066 break;
1068 case ZERO_EXTEND:
1069 /* Check for a zero extension of a subreg of a promoted
1070 variable, where the promotion is zero-extended, and the
1071 target mode is the same as the variable's promotion. */
1072 if (GET_CODE (op) == SUBREG
1073 && SUBREG_PROMOTED_VAR_P (op)
1074 && SUBREG_PROMOTED_UNSIGNED_P (op)
1075 && GET_MODE (XEXP (op, 0)) == mode)
1076 return XEXP (op, 0);
1078 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1079 if (POINTERS_EXTEND_UNSIGNED > 0
1080 && mode == Pmode && GET_MODE (op) == ptr_mode
1081 && (CONSTANT_P (op)
1082 || (GET_CODE (op) == SUBREG
1083 && REG_P (SUBREG_REG (op))
1084 && REG_POINTER (SUBREG_REG (op))
1085 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1086 return convert_memory_address (Pmode, op);
1087 #endif
1088 break;
1090 default:
1091 break;
1094 return 0;
1098 /* Subroutine of simplify_binary_operation to simplify a commutative,
1099 associative binary operation CODE with result mode MODE, operating
1100 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1101 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1102 canonicalization is possible. */
1104 static rtx
1105 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1106 rtx op0, rtx op1)
1108 rtx tem;
1110 /* Linearize the operator to the left. */
1111 if (GET_CODE (op1) == code)
1113 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1114 if (GET_CODE (op0) == code)
1116 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1117 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1120 /* "a op (b op c)" becomes "(b op c) op a". */
1121 if (! swap_commutative_operands_p (op1, op0))
1122 return simplify_gen_binary (code, mode, op1, op0);
1124 tem = op0;
1125 op0 = op1;
1126 op1 = tem;
1129 if (GET_CODE (op0) == code)
1131 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1132 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1134 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1135 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1138 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1139 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1140 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1141 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1142 if (tem != 0)
1143 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1145 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1146 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1147 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1148 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1149 if (tem != 0)
1150 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1153 return 0;
1156 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1157 and OP1. Return 0 if no simplification is possible.
1159 Don't use this for relational operations such as EQ or LT.
1160 Use simplify_relational_operation instead. */
1162 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1163 rtx op0, rtx op1)
1165 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1166 HOST_WIDE_INT val;
1167 unsigned int width = GET_MODE_BITSIZE (mode);
1168 rtx trueop0, trueop1;
1169 rtx tem;
1171 /* Relational operations don't work here. We must know the mode
1172 of the operands in order to do the comparison correctly.
1173 Assuming a full word can give incorrect results.
1174 Consider comparing 128 with -128 in QImode. */
1175 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1176 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1178 /* Make sure the constant is second. */
1179 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1180 && swap_commutative_operands_p (op0, op1))
1182 tem = op0, op0 = op1, op1 = tem;
1185 trueop0 = avoid_constant_pool_reference (op0);
1186 trueop1 = avoid_constant_pool_reference (op1);
1188 if (VECTOR_MODE_P (mode)
1189 && code != VEC_CONCAT
1190 && GET_CODE (trueop0) == CONST_VECTOR
1191 && GET_CODE (trueop1) == CONST_VECTOR)
1193 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1194 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1195 enum machine_mode op0mode = GET_MODE (trueop0);
1196 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1197 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1198 enum machine_mode op1mode = GET_MODE (trueop1);
1199 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1200 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1201 rtvec v = rtvec_alloc (n_elts);
1202 unsigned int i;
1204 gcc_assert (op0_n_elts == n_elts);
1205 gcc_assert (op1_n_elts == n_elts);
1206 for (i = 0; i < n_elts; i++)
1208 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1209 CONST_VECTOR_ELT (trueop0, i),
1210 CONST_VECTOR_ELT (trueop1, i));
1211 if (!x)
1212 return 0;
1213 RTVEC_ELT (v, i) = x;
1216 return gen_rtx_CONST_VECTOR (mode, v);
1219 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1220 && GET_CODE (trueop0) == CONST_DOUBLE
1221 && GET_CODE (trueop1) == CONST_DOUBLE
1222 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1224 if (code == AND
1225 || code == IOR
1226 || code == XOR)
1228 long tmp0[4];
1229 long tmp1[4];
1230 REAL_VALUE_TYPE r;
1231 int i;
1233 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1234 GET_MODE (op0));
1235 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1236 GET_MODE (op1));
1237 for (i = 0; i < 4; i++)
1239 switch (code)
1241 case AND:
1242 tmp0[i] &= tmp1[i];
1243 break;
1244 case IOR:
1245 tmp0[i] |= tmp1[i];
1246 break;
1247 case XOR:
1248 tmp0[i] ^= tmp1[i];
1249 break;
1250 default:
1251 gcc_unreachable ();
1254 real_from_target (&r, tmp0, mode);
1255 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1257 else
1259 REAL_VALUE_TYPE f0, f1, value;
1261 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1262 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1263 f0 = real_value_truncate (mode, f0);
1264 f1 = real_value_truncate (mode, f1);
1266 if (HONOR_SNANS (mode)
1267 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1268 return 0;
1270 if (code == DIV
1271 && REAL_VALUES_EQUAL (f1, dconst0)
1272 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1273 return 0;
1275 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1276 && flag_trapping_math
1277 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1279 int s0 = REAL_VALUE_NEGATIVE (f0);
1280 int s1 = REAL_VALUE_NEGATIVE (f1);
1282 switch (code)
1284 case PLUS:
1285 /* Inf + -Inf = NaN plus exception. */
1286 if (s0 != s1)
1287 return 0;
1288 break;
1289 case MINUS:
1290 /* Inf - Inf = NaN plus exception. */
1291 if (s0 == s1)
1292 return 0;
1293 break;
1294 case DIV:
1295 /* Inf / Inf = NaN plus exception. */
1296 return 0;
1297 default:
1298 break;
1302 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1303 && flag_trapping_math
1304 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1305 || (REAL_VALUE_ISINF (f1)
1306 && REAL_VALUES_EQUAL (f0, dconst0))))
1307 /* Inf * 0 = NaN plus exception. */
1308 return 0;
1310 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1312 value = real_value_truncate (mode, value);
1313 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1317 /* We can fold some multi-word operations. */
1318 if (GET_MODE_CLASS (mode) == MODE_INT
1319 && width == HOST_BITS_PER_WIDE_INT * 2
1320 && (GET_CODE (trueop0) == CONST_DOUBLE
1321 || GET_CODE (trueop0) == CONST_INT)
1322 && (GET_CODE (trueop1) == CONST_DOUBLE
1323 || GET_CODE (trueop1) == CONST_INT))
1325 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1326 HOST_WIDE_INT h1, h2, hv, ht;
1328 if (GET_CODE (trueop0) == CONST_DOUBLE)
1329 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1330 else
1331 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1333 if (GET_CODE (trueop1) == CONST_DOUBLE)
1334 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1335 else
1336 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1338 switch (code)
1340 case MINUS:
1341 /* A - B == A + (-B). */
1342 neg_double (l2, h2, &lv, &hv);
1343 l2 = lv, h2 = hv;
1345 /* Fall through.... */
1347 case PLUS:
1348 add_double (l1, h1, l2, h2, &lv, &hv);
1349 break;
1351 case MULT:
1352 mul_double (l1, h1, l2, h2, &lv, &hv);
1353 break;
1355 case DIV:
1356 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1357 &lv, &hv, &lt, &ht))
1358 return 0;
1359 break;
1361 case MOD:
1362 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1363 &lt, &ht, &lv, &hv))
1364 return 0;
1365 break;
1367 case UDIV:
1368 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1369 &lv, &hv, &lt, &ht))
1370 return 0;
1371 break;
1373 case UMOD:
1374 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1375 &lt, &ht, &lv, &hv))
1376 return 0;
1377 break;
1379 case AND:
1380 lv = l1 & l2, hv = h1 & h2;
1381 break;
1383 case IOR:
1384 lv = l1 | l2, hv = h1 | h2;
1385 break;
1387 case XOR:
1388 lv = l1 ^ l2, hv = h1 ^ h2;
1389 break;
1391 case SMIN:
1392 if (h1 < h2
1393 || (h1 == h2
1394 && ((unsigned HOST_WIDE_INT) l1
1395 < (unsigned HOST_WIDE_INT) l2)))
1396 lv = l1, hv = h1;
1397 else
1398 lv = l2, hv = h2;
1399 break;
1401 case SMAX:
1402 if (h1 > h2
1403 || (h1 == h2
1404 && ((unsigned HOST_WIDE_INT) l1
1405 > (unsigned HOST_WIDE_INT) l2)))
1406 lv = l1, hv = h1;
1407 else
1408 lv = l2, hv = h2;
1409 break;
1411 case UMIN:
1412 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1413 || (h1 == h2
1414 && ((unsigned HOST_WIDE_INT) l1
1415 < (unsigned HOST_WIDE_INT) l2)))
1416 lv = l1, hv = h1;
1417 else
1418 lv = l2, hv = h2;
1419 break;
1421 case UMAX:
1422 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1423 || (h1 == h2
1424 && ((unsigned HOST_WIDE_INT) l1
1425 > (unsigned HOST_WIDE_INT) l2)))
1426 lv = l1, hv = h1;
1427 else
1428 lv = l2, hv = h2;
1429 break;
1431 case LSHIFTRT: case ASHIFTRT:
1432 case ASHIFT:
1433 case ROTATE: case ROTATERT:
1434 if (SHIFT_COUNT_TRUNCATED)
1435 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1437 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1438 return 0;
1440 if (code == LSHIFTRT || code == ASHIFTRT)
1441 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1442 code == ASHIFTRT);
1443 else if (code == ASHIFT)
1444 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1445 else if (code == ROTATE)
1446 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1447 else /* code == ROTATERT */
1448 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1449 break;
1451 default:
1452 return 0;
1455 return immed_double_const (lv, hv, mode);
1458 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1459 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1461 /* Even if we can't compute a constant result,
1462 there are some cases worth simplifying. */
1464 switch (code)
1466 case PLUS:
1467 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1468 when x is NaN, infinite, or finite and nonzero. They aren't
1469 when x is -0 and the rounding mode is not towards -infinity,
1470 since (-0) + 0 is then 0. */
1471 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1472 return op0;
1474 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1475 transformations are safe even for IEEE. */
1476 if (GET_CODE (op0) == NEG)
1477 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1478 else if (GET_CODE (op1) == NEG)
1479 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1481 /* (~a) + 1 -> -a */
1482 if (INTEGRAL_MODE_P (mode)
1483 && GET_CODE (op0) == NOT
1484 && trueop1 == const1_rtx)
1485 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1487 /* Handle both-operands-constant cases. We can only add
1488 CONST_INTs to constants since the sum of relocatable symbols
1489 can't be handled by most assemblers. Don't add CONST_INT
1490 to CONST_INT since overflow won't be computed properly if wider
1491 than HOST_BITS_PER_WIDE_INT. */
1493 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1494 && GET_CODE (op1) == CONST_INT)
1495 return plus_constant (op0, INTVAL (op1));
1496 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1497 && GET_CODE (op0) == CONST_INT)
1498 return plus_constant (op1, INTVAL (op0));
1500 /* See if this is something like X * C - X or vice versa or
1501 if the multiplication is written as a shift. If so, we can
1502 distribute and make a new multiply, shift, or maybe just
1503 have X (if C is 2 in the example above). But don't make
1504 something more expensive than we had before. */
1506 if (! FLOAT_MODE_P (mode))
1508 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1509 rtx lhs = op0, rhs = op1;
1511 if (GET_CODE (lhs) == NEG)
1512 coeff0 = -1, lhs = XEXP (lhs, 0);
1513 else if (GET_CODE (lhs) == MULT
1514 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1516 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1518 else if (GET_CODE (lhs) == ASHIFT
1519 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1520 && INTVAL (XEXP (lhs, 1)) >= 0
1521 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1523 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1524 lhs = XEXP (lhs, 0);
1527 if (GET_CODE (rhs) == NEG)
1528 coeff1 = -1, rhs = XEXP (rhs, 0);
1529 else if (GET_CODE (rhs) == MULT
1530 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1532 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1534 else if (GET_CODE (rhs) == ASHIFT
1535 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1536 && INTVAL (XEXP (rhs, 1)) >= 0
1537 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1539 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1540 rhs = XEXP (rhs, 0);
1543 if (rtx_equal_p (lhs, rhs))
1545 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1546 tem = simplify_gen_binary (MULT, mode, lhs,
1547 GEN_INT (coeff0 + coeff1));
1548 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1549 ? tem : 0;
1553 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1554 if ((GET_CODE (op1) == CONST_INT
1555 || GET_CODE (op1) == CONST_DOUBLE)
1556 && GET_CODE (op0) == XOR
1557 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1558 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1559 && mode_signbit_p (mode, op1))
1560 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1561 simplify_gen_binary (XOR, mode, op1,
1562 XEXP (op0, 1)));
1564 /* If one of the operands is a PLUS or a MINUS, see if we can
1565 simplify this by the associative law.
1566 Don't use the associative law for floating point.
1567 The inaccuracy makes it nonassociative,
1568 and subtle programs can break if operations are associated. */
1570 if (INTEGRAL_MODE_P (mode)
1571 && (plus_minus_operand_p (op0)
1572 || plus_minus_operand_p (op1))
1573 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1574 return tem;
1576 /* Reassociate floating point addition only when the user
1577 specifies unsafe math optimizations. */
1578 if (FLOAT_MODE_P (mode)
1579 && flag_unsafe_math_optimizations)
1581 tem = simplify_associative_operation (code, mode, op0, op1);
1582 if (tem)
1583 return tem;
1585 break;
1587 case COMPARE:
1588 #ifdef HAVE_cc0
1589 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1590 using cc0, in which case we want to leave it as a COMPARE
1591 so we can distinguish it from a register-register-copy.
1593 In IEEE floating point, x-0 is not the same as x. */
1595 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1596 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1597 && trueop1 == CONST0_RTX (mode))
1598 return op0;
1599 #endif
1601 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1602 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1603 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1604 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1606 rtx xop00 = XEXP (op0, 0);
1607 rtx xop10 = XEXP (op1, 0);
1609 #ifdef HAVE_cc0
1610 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1611 #else
1612 if (REG_P (xop00) && REG_P (xop10)
1613 && GET_MODE (xop00) == GET_MODE (xop10)
1614 && REGNO (xop00) == REGNO (xop10)
1615 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1616 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1617 #endif
1618 return xop00;
1620 break;
1622 case MINUS:
1623 /* We can't assume x-x is 0 even with non-IEEE floating point,
1624 but since it is zero except in very strange circumstances, we
1625 will treat it as zero with -funsafe-math-optimizations. */
1626 if (rtx_equal_p (trueop0, trueop1)
1627 && ! side_effects_p (op0)
1628 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1629 return CONST0_RTX (mode);
1631 /* Change subtraction from zero into negation. (0 - x) is the
1632 same as -x when x is NaN, infinite, or finite and nonzero.
1633 But if the mode has signed zeros, and does not round towards
1634 -infinity, then 0 - 0 is 0, not -0. */
1635 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1636 return simplify_gen_unary (NEG, mode, op1, mode);
1638 /* (-1 - a) is ~a. */
1639 if (trueop0 == constm1_rtx)
1640 return simplify_gen_unary (NOT, mode, op1, mode);
1642 /* Subtracting 0 has no effect unless the mode has signed zeros
1643 and supports rounding towards -infinity. In such a case,
1644 0 - 0 is -0. */
1645 if (!(HONOR_SIGNED_ZEROS (mode)
1646 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1647 && trueop1 == CONST0_RTX (mode))
1648 return op0;
1650 /* See if this is something like X * C - X or vice versa or
1651 if the multiplication is written as a shift. If so, we can
1652 distribute and make a new multiply, shift, or maybe just
1653 have X (if C is 2 in the example above). But don't make
1654 something more expensive than we had before. */
1656 if (! FLOAT_MODE_P (mode))
1658 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1659 rtx lhs = op0, rhs = op1;
1661 if (GET_CODE (lhs) == NEG)
1662 coeff0 = -1, lhs = XEXP (lhs, 0);
1663 else if (GET_CODE (lhs) == MULT
1664 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1666 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1668 else if (GET_CODE (lhs) == ASHIFT
1669 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1670 && INTVAL (XEXP (lhs, 1)) >= 0
1671 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1673 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1674 lhs = XEXP (lhs, 0);
1677 if (GET_CODE (rhs) == NEG)
1678 coeff1 = - 1, rhs = XEXP (rhs, 0);
1679 else if (GET_CODE (rhs) == MULT
1680 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1682 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1684 else if (GET_CODE (rhs) == ASHIFT
1685 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1686 && INTVAL (XEXP (rhs, 1)) >= 0
1687 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1689 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1690 rhs = XEXP (rhs, 0);
1693 if (rtx_equal_p (lhs, rhs))
1695 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1696 tem = simplify_gen_binary (MULT, mode, lhs,
1697 GEN_INT (coeff0 - coeff1));
1698 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1699 ? tem : 0;
1703 /* (a - (-b)) -> (a + b). True even for IEEE. */
1704 if (GET_CODE (op1) == NEG)
1705 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1707 /* (-x - c) may be simplified as (-c - x). */
1708 if (GET_CODE (op0) == NEG
1709 && (GET_CODE (op1) == CONST_INT
1710 || GET_CODE (op1) == CONST_DOUBLE))
1712 tem = simplify_unary_operation (NEG, mode, op1, mode);
1713 if (tem)
1714 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1717 /* If one of the operands is a PLUS or a MINUS, see if we can
1718 simplify this by the associative law.
1719 Don't use the associative law for floating point.
1720 The inaccuracy makes it nonassociative,
1721 and subtle programs can break if operations are associated. */
1723 if (INTEGRAL_MODE_P (mode)
1724 && (plus_minus_operand_p (op0)
1725 || plus_minus_operand_p (op1))
1726 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1727 return tem;
1729 /* Don't let a relocatable value get a negative coeff. */
1730 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1731 return simplify_gen_binary (PLUS, mode,
1732 op0,
1733 neg_const_int (mode, op1));
1735 /* (x - (x & y)) -> (x & ~y) */
1736 if (GET_CODE (op1) == AND)
1738 if (rtx_equal_p (op0, XEXP (op1, 0)))
1740 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1741 GET_MODE (XEXP (op1, 1)));
1742 return simplify_gen_binary (AND, mode, op0, tem);
1744 if (rtx_equal_p (op0, XEXP (op1, 1)))
1746 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1747 GET_MODE (XEXP (op1, 0)));
1748 return simplify_gen_binary (AND, mode, op0, tem);
1751 break;
1753 case MULT:
1754 if (trueop1 == constm1_rtx)
1755 return simplify_gen_unary (NEG, mode, op0, mode);
1757 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1758 x is NaN, since x * 0 is then also NaN. Nor is it valid
1759 when the mode has signed zeros, since multiplying a negative
1760 number by 0 will give -0, not 0. */
1761 if (!HONOR_NANS (mode)
1762 && !HONOR_SIGNED_ZEROS (mode)
1763 && trueop1 == CONST0_RTX (mode)
1764 && ! side_effects_p (op0))
1765 return op1;
1767 /* In IEEE floating point, x*1 is not equivalent to x for
1768 signalling NaNs. */
1769 if (!HONOR_SNANS (mode)
1770 && trueop1 == CONST1_RTX (mode))
1771 return op0;
1773 /* Convert multiply by constant power of two into shift unless
1774 we are still generating RTL. This test is a kludge. */
1775 if (GET_CODE (trueop1) == CONST_INT
1776 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1777 /* If the mode is larger than the host word size, and the
1778 uppermost bit is set, then this isn't a power of two due
1779 to implicit sign extension. */
1780 && (width <= HOST_BITS_PER_WIDE_INT
1781 || val != HOST_BITS_PER_WIDE_INT - 1))
1782 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1784 /* x*2 is x+x and x*(-1) is -x */
1785 if (GET_CODE (trueop1) == CONST_DOUBLE
1786 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1787 && GET_MODE (op0) == mode)
1789 REAL_VALUE_TYPE d;
1790 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1792 if (REAL_VALUES_EQUAL (d, dconst2))
1793 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1795 if (REAL_VALUES_EQUAL (d, dconstm1))
1796 return simplify_gen_unary (NEG, mode, op0, mode);
1799 /* Reassociate multiplication, but for floating point MULTs
1800 only when the user specifies unsafe math optimizations. */
1801 if (! FLOAT_MODE_P (mode)
1802 || flag_unsafe_math_optimizations)
1804 tem = simplify_associative_operation (code, mode, op0, op1);
1805 if (tem)
1806 return tem;
1808 break;
1810 case IOR:
1811 if (trueop1 == const0_rtx)
1812 return op0;
1813 if (GET_CODE (trueop1) == CONST_INT
1814 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1815 == GET_MODE_MASK (mode)))
1816 return op1;
1817 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1818 return op0;
1819 /* A | (~A) -> -1 */
1820 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1821 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1822 && ! side_effects_p (op0)
1823 && GET_MODE_CLASS (mode) != MODE_CC)
1824 return constm1_rtx;
1825 tem = simplify_associative_operation (code, mode, op0, op1);
1826 if (tem)
1827 return tem;
1828 break;
1830 case XOR:
1831 if (trueop1 == const0_rtx)
1832 return op0;
1833 if (GET_CODE (trueop1) == CONST_INT
1834 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1835 == GET_MODE_MASK (mode)))
1836 return simplify_gen_unary (NOT, mode, op0, mode);
1837 if (trueop0 == trueop1
1838 && ! side_effects_p (op0)
1839 && GET_MODE_CLASS (mode) != MODE_CC)
1840 return const0_rtx;
1842 /* Canonicalize XOR of the most significant bit to PLUS. */
1843 if ((GET_CODE (op1) == CONST_INT
1844 || GET_CODE (op1) == CONST_DOUBLE)
1845 && mode_signbit_p (mode, op1))
1846 return simplify_gen_binary (PLUS, mode, op0, op1);
1847 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1848 if ((GET_CODE (op1) == CONST_INT
1849 || GET_CODE (op1) == CONST_DOUBLE)
1850 && GET_CODE (op0) == PLUS
1851 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1852 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1853 && mode_signbit_p (mode, XEXP (op0, 1)))
1854 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1855 simplify_gen_binary (XOR, mode, op1,
1856 XEXP (op0, 1)));
1858 tem = simplify_associative_operation (code, mode, op0, op1);
1859 if (tem)
1860 return tem;
1861 break;
1863 case AND:
1864 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1865 return const0_rtx;
1866 /* If we are turning off bits already known off in OP0, we need
1867 not do an AND. */
1868 if (GET_CODE (trueop1) == CONST_INT
1869 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1870 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1871 return op0;
1872 if (trueop0 == trueop1 && ! side_effects_p (op0)
1873 && GET_MODE_CLASS (mode) != MODE_CC)
1874 return op0;
1875 /* A & (~A) -> 0 */
1876 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1877 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1878 && ! side_effects_p (op0)
1879 && GET_MODE_CLASS (mode) != MODE_CC)
1880 return const0_rtx;
1881 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1882 ((A & N) + B) & M -> (A + B) & M
1883 Similarly if (N & M) == 0,
1884 ((A | N) + B) & M -> (A + B) & M
1885 and for - instead of + and/or ^ instead of |. */
1886 if (GET_CODE (trueop1) == CONST_INT
1887 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1888 && ~INTVAL (trueop1)
1889 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1890 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1892 rtx pmop[2];
1893 int which;
1895 pmop[0] = XEXP (op0, 0);
1896 pmop[1] = XEXP (op0, 1);
1898 for (which = 0; which < 2; which++)
1900 tem = pmop[which];
1901 switch (GET_CODE (tem))
1903 case AND:
1904 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1905 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1906 == INTVAL (trueop1))
1907 pmop[which] = XEXP (tem, 0);
1908 break;
1909 case IOR:
1910 case XOR:
1911 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1912 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1913 pmop[which] = XEXP (tem, 0);
1914 break;
1915 default:
1916 break;
1920 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1922 tem = simplify_gen_binary (GET_CODE (op0), mode,
1923 pmop[0], pmop[1]);
1924 return simplify_gen_binary (code, mode, tem, op1);
1927 tem = simplify_associative_operation (code, mode, op0, op1);
1928 if (tem)
1929 return tem;
1930 break;
1932 case UDIV:
1933 /* 0/x is 0 (or x&0 if x has side-effects). */
1934 if (trueop0 == const0_rtx)
1935 return side_effects_p (op1)
1936 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1937 : const0_rtx;
1938 /* x/1 is x. */
1939 if (trueop1 == const1_rtx)
1941 /* Handle narrowing UDIV. */
1942 rtx x = gen_lowpart_common (mode, op0);
1943 if (x)
1944 return x;
1945 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1946 return gen_lowpart_SUBREG (mode, op0);
1947 return op0;
1949 /* Convert divide by power of two into shift. */
1950 if (GET_CODE (trueop1) == CONST_INT
1951 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1952 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1953 break;
1955 case DIV:
1956 /* Handle floating point and integers separately. */
1957 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1959 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1960 safe for modes with NaNs, since 0.0 / 0.0 will then be
1961 NaN rather than 0.0. Nor is it safe for modes with signed
1962 zeros, since dividing 0 by a negative number gives -0.0 */
1963 if (trueop0 == CONST0_RTX (mode)
1964 && !HONOR_NANS (mode)
1965 && !HONOR_SIGNED_ZEROS (mode)
1966 && ! side_effects_p (op1))
1967 return op0;
1968 /* x/1.0 is x. */
1969 if (trueop1 == CONST1_RTX (mode)
1970 && !HONOR_SNANS (mode))
1971 return op0;
1973 if (GET_CODE (trueop1) == CONST_DOUBLE
1974 && trueop1 != CONST0_RTX (mode))
1976 REAL_VALUE_TYPE d;
1977 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1979 /* x/-1.0 is -x. */
1980 if (REAL_VALUES_EQUAL (d, dconstm1)
1981 && !HONOR_SNANS (mode))
1982 return simplify_gen_unary (NEG, mode, op0, mode);
1984 /* Change FP division by a constant into multiplication.
1985 Only do this with -funsafe-math-optimizations. */
1986 if (flag_unsafe_math_optimizations
1987 && !REAL_VALUES_EQUAL (d, dconst0))
1989 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1990 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1991 return simplify_gen_binary (MULT, mode, op0, tem);
1995 else
1997 /* 0/x is 0 (or x&0 if x has side-effects). */
1998 if (trueop0 == const0_rtx)
1999 return side_effects_p (op1)
2000 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2001 : const0_rtx;
2002 /* x/1 is x. */
2003 if (trueop1 == const1_rtx)
2005 /* Handle narrowing DIV. */
2006 rtx x = gen_lowpart_common (mode, op0);
2007 if (x)
2008 return x;
2009 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2010 return gen_lowpart_SUBREG (mode, op0);
2011 return op0;
2013 /* x/-1 is -x. */
2014 if (trueop1 == constm1_rtx)
2016 rtx x = gen_lowpart_common (mode, op0);
2017 if (!x)
2018 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2019 ? gen_lowpart_SUBREG (mode, op0) : op0;
2020 return simplify_gen_unary (NEG, mode, x, mode);
2023 break;
2025 case UMOD:
2026 /* 0%x is 0 (or x&0 if x has side-effects). */
2027 if (trueop0 == const0_rtx)
2028 return side_effects_p (op1)
2029 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2030 : const0_rtx;
2031 /* x%1 is 0 (of x&0 if x has side-effects). */
2032 if (trueop1 == const1_rtx)
2033 return side_effects_p (op0)
2034 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2035 : const0_rtx;
2036 /* Implement modulus by power of two as AND. */
2037 if (GET_CODE (trueop1) == CONST_INT
2038 && exact_log2 (INTVAL (trueop1)) > 0)
2039 return simplify_gen_binary (AND, mode, op0,
2040 GEN_INT (INTVAL (op1) - 1));
2041 break;
2043 case MOD:
2044 /* 0%x is 0 (or x&0 if x has side-effects). */
2045 if (trueop0 == const0_rtx)
2046 return side_effects_p (op1)
2047 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2048 : const0_rtx;
2049 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2050 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
2051 return side_effects_p (op0)
2052 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2053 : const0_rtx;
2054 break;
2056 case ROTATERT:
2057 case ROTATE:
2058 case ASHIFTRT:
2059 /* Rotating ~0 always results in ~0. */
2060 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2061 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2062 && ! side_effects_p (op1))
2063 return op0;
2065 /* Fall through.... */
2067 case ASHIFT:
2068 case LSHIFTRT:
2069 if (trueop1 == const0_rtx)
2070 return op0;
2071 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2072 return op0;
2073 break;
2075 case SMIN:
2076 if (width <= HOST_BITS_PER_WIDE_INT
2077 && GET_CODE (trueop1) == CONST_INT
2078 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2079 && ! side_effects_p (op0))
2080 return op1;
2081 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2082 return op0;
2083 tem = simplify_associative_operation (code, mode, op0, op1);
2084 if (tem)
2085 return tem;
2086 break;
2088 case SMAX:
2089 if (width <= HOST_BITS_PER_WIDE_INT
2090 && GET_CODE (trueop1) == CONST_INT
2091 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2092 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2093 && ! side_effects_p (op0))
2094 return op1;
2095 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2096 return op0;
2097 tem = simplify_associative_operation (code, mode, op0, op1);
2098 if (tem)
2099 return tem;
2100 break;
2102 case UMIN:
2103 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2104 return op1;
2105 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2106 return op0;
2107 tem = simplify_associative_operation (code, mode, op0, op1);
2108 if (tem)
2109 return tem;
2110 break;
2112 case UMAX:
2113 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2114 return op1;
2115 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2116 return op0;
2117 tem = simplify_associative_operation (code, mode, op0, op1);
2118 if (tem)
2119 return tem;
2120 break;
2122 case SS_PLUS:
2123 case US_PLUS:
2124 case SS_MINUS:
2125 case US_MINUS:
2126 /* ??? There are simplifications that can be done. */
2127 return 0;
2129 case VEC_SELECT:
2130 if (!VECTOR_MODE_P (mode))
2132 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2133 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2134 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2135 gcc_assert (XVECLEN (trueop1, 0) == 1);
2136 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2138 if (GET_CODE (trueop0) == CONST_VECTOR)
2139 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2140 (trueop1, 0, 0)));
2142 else
2144 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2145 gcc_assert (GET_MODE_INNER (mode)
2146 == GET_MODE_INNER (GET_MODE (trueop0)));
2147 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2149 if (GET_CODE (trueop0) == CONST_VECTOR)
2151 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2152 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2153 rtvec v = rtvec_alloc (n_elts);
2154 unsigned int i;
2156 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2157 for (i = 0; i < n_elts; i++)
2159 rtx x = XVECEXP (trueop1, 0, i);
2161 gcc_assert (GET_CODE (x) == CONST_INT);
2162 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2163 INTVAL (x));
2166 return gen_rtx_CONST_VECTOR (mode, v);
2169 return 0;
2170 case VEC_CONCAT:
2172 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2173 ? GET_MODE (trueop0)
2174 : GET_MODE_INNER (mode));
2175 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2176 ? GET_MODE (trueop1)
2177 : GET_MODE_INNER (mode));
2179 gcc_assert (VECTOR_MODE_P (mode));
2180 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2181 == GET_MODE_SIZE (mode));
2183 if (VECTOR_MODE_P (op0_mode))
2184 gcc_assert (GET_MODE_INNER (mode)
2185 == GET_MODE_INNER (op0_mode));
2186 else
2187 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2189 if (VECTOR_MODE_P (op1_mode))
2190 gcc_assert (GET_MODE_INNER (mode)
2191 == GET_MODE_INNER (op1_mode));
2192 else
2193 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2195 if ((GET_CODE (trueop0) == CONST_VECTOR
2196 || GET_CODE (trueop0) == CONST_INT
2197 || GET_CODE (trueop0) == CONST_DOUBLE)
2198 && (GET_CODE (trueop1) == CONST_VECTOR
2199 || GET_CODE (trueop1) == CONST_INT
2200 || GET_CODE (trueop1) == CONST_DOUBLE))
2202 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2203 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2204 rtvec v = rtvec_alloc (n_elts);
2205 unsigned int i;
2206 unsigned in_n_elts = 1;
2208 if (VECTOR_MODE_P (op0_mode))
2209 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2210 for (i = 0; i < n_elts; i++)
2212 if (i < in_n_elts)
2214 if (!VECTOR_MODE_P (op0_mode))
2215 RTVEC_ELT (v, i) = trueop0;
2216 else
2217 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2219 else
2221 if (!VECTOR_MODE_P (op1_mode))
2222 RTVEC_ELT (v, i) = trueop1;
2223 else
2224 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2225 i - in_n_elts);
2229 return gen_rtx_CONST_VECTOR (mode, v);
2232 return 0;
2234 default:
2235 gcc_unreachable ();
2238 return 0;
2241 /* Get the integer argument values in two forms:
2242 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2244 arg0 = INTVAL (trueop0);
2245 arg1 = INTVAL (trueop1);
2247 if (width < HOST_BITS_PER_WIDE_INT)
2249 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2250 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2252 arg0s = arg0;
2253 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2254 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2256 arg1s = arg1;
2257 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2258 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2260 else
2262 arg0s = arg0;
2263 arg1s = arg1;
2266 /* Compute the value of the arithmetic. */
2268 switch (code)
2270 case PLUS:
2271 val = arg0s + arg1s;
2272 break;
2274 case MINUS:
2275 val = arg0s - arg1s;
2276 break;
2278 case MULT:
2279 val = arg0s * arg1s;
2280 break;
2282 case DIV:
2283 if (arg1s == 0
2284 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2285 && arg1s == -1))
2286 return 0;
2287 val = arg0s / arg1s;
2288 break;
2290 case MOD:
2291 if (arg1s == 0
2292 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2293 && arg1s == -1))
2294 return 0;
2295 val = arg0s % arg1s;
2296 break;
2298 case UDIV:
2299 if (arg1 == 0
2300 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2301 && arg1s == -1))
2302 return 0;
2303 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2304 break;
2306 case UMOD:
2307 if (arg1 == 0
2308 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2309 && arg1s == -1))
2310 return 0;
2311 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2312 break;
2314 case AND:
2315 val = arg0 & arg1;
2316 break;
2318 case IOR:
2319 val = arg0 | arg1;
2320 break;
2322 case XOR:
2323 val = arg0 ^ arg1;
2324 break;
2326 case LSHIFTRT:
2327 case ASHIFT:
2328 case ASHIFTRT:
2329 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2330 value is in range. We can't return any old value for out-of-range
2331 arguments because either the middle-end (via shift_truncation_mask)
2332 or the back-end might be relying on target-specific knowledge.
2333 Nor can we rely on shift_truncation_mask, since the shift might
2334 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2335 if (SHIFT_COUNT_TRUNCATED)
2336 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2337 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2338 return 0;
2340 val = (code == ASHIFT
2341 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2342 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2344 /* Sign-extend the result for arithmetic right shifts. */
2345 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2346 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2347 break;
2349 case ROTATERT:
2350 if (arg1 < 0)
2351 return 0;
2353 arg1 %= width;
2354 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2355 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2356 break;
2358 case ROTATE:
2359 if (arg1 < 0)
2360 return 0;
2362 arg1 %= width;
2363 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2364 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2365 break;
2367 case COMPARE:
2368 /* Do nothing here. */
2369 return 0;
2371 case SMIN:
2372 val = arg0s <= arg1s ? arg0s : arg1s;
2373 break;
2375 case UMIN:
2376 val = ((unsigned HOST_WIDE_INT) arg0
2377 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2378 break;
2380 case SMAX:
2381 val = arg0s > arg1s ? arg0s : arg1s;
2382 break;
2384 case UMAX:
2385 val = ((unsigned HOST_WIDE_INT) arg0
2386 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2387 break;
2389 case SS_PLUS:
2390 case US_PLUS:
2391 case SS_MINUS:
2392 case US_MINUS:
2393 /* ??? There are simplifications that can be done. */
2394 return 0;
2396 default:
2397 gcc_unreachable ();
2400 val = trunc_int_for_mode (val, mode);
2402 return GEN_INT (val);
2405 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2406 PLUS or MINUS.
2408 Rather than test for specific case, we do this by a brute-force method
2409 and do all possible simplifications until no more changes occur. Then
2410 we rebuild the operation.
2412 If FORCE is true, then always generate the rtx. This is used to
2413 canonicalize stuff emitted from simplify_gen_binary. Note that this
2414 can still fail if the rtx is too complex. It won't fail just because
2415 the result is not 'simpler' than the input, however. */
2417 struct simplify_plus_minus_op_data
2419 rtx op;
2420 int neg;
2423 static int
2424 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2426 const struct simplify_plus_minus_op_data *d1 = p1;
2427 const struct simplify_plus_minus_op_data *d2 = p2;
2429 return (commutative_operand_precedence (d2->op)
2430 - commutative_operand_precedence (d1->op));
2433 static rtx
2434 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2435 rtx op1, int force)
2437 struct simplify_plus_minus_op_data ops[8];
2438 rtx result, tem;
2439 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2440 int first, changed;
2441 int i, j;
2443 memset (ops, 0, sizeof ops);
2445 /* Set up the two operands and then expand them until nothing has been
2446 changed. If we run out of room in our array, give up; this should
2447 almost never happen. */
2449 ops[0].op = op0;
2450 ops[0].neg = 0;
2451 ops[1].op = op1;
2452 ops[1].neg = (code == MINUS);
2456 changed = 0;
2458 for (i = 0; i < n_ops; i++)
2460 rtx this_op = ops[i].op;
2461 int this_neg = ops[i].neg;
2462 enum rtx_code this_code = GET_CODE (this_op);
2464 switch (this_code)
2466 case PLUS:
2467 case MINUS:
2468 if (n_ops == 7)
2469 return NULL_RTX;
2471 ops[n_ops].op = XEXP (this_op, 1);
2472 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2473 n_ops++;
2475 ops[i].op = XEXP (this_op, 0);
2476 input_ops++;
2477 changed = 1;
2478 break;
2480 case NEG:
2481 ops[i].op = XEXP (this_op, 0);
2482 ops[i].neg = ! this_neg;
2483 changed = 1;
2484 break;
2486 case CONST:
2487 if (n_ops < 7
2488 && GET_CODE (XEXP (this_op, 0)) == PLUS
2489 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2490 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2492 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2493 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2494 ops[n_ops].neg = this_neg;
2495 n_ops++;
2496 input_consts++;
2497 changed = 1;
2499 break;
2501 case NOT:
2502 /* ~a -> (-a - 1) */
2503 if (n_ops != 7)
2505 ops[n_ops].op = constm1_rtx;
2506 ops[n_ops++].neg = this_neg;
2507 ops[i].op = XEXP (this_op, 0);
2508 ops[i].neg = !this_neg;
2509 changed = 1;
2511 break;
2513 case CONST_INT:
2514 if (this_neg)
2516 ops[i].op = neg_const_int (mode, this_op);
2517 ops[i].neg = 0;
2518 changed = 1;
2520 break;
2522 default:
2523 break;
2527 while (changed);
2529 /* If we only have two operands, we can't do anything. */
2530 if (n_ops <= 2 && !force)
2531 return NULL_RTX;
2533 /* Count the number of CONSTs we didn't split above. */
2534 for (i = 0; i < n_ops; i++)
2535 if (GET_CODE (ops[i].op) == CONST)
2536 input_consts++;
2538 /* Now simplify each pair of operands until nothing changes. The first
2539 time through just simplify constants against each other. */
2541 first = 1;
2544 changed = first;
2546 for (i = 0; i < n_ops - 1; i++)
2547 for (j = i + 1; j < n_ops; j++)
2549 rtx lhs = ops[i].op, rhs = ops[j].op;
2550 int lneg = ops[i].neg, rneg = ops[j].neg;
2552 if (lhs != 0 && rhs != 0
2553 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2555 enum rtx_code ncode = PLUS;
2557 if (lneg != rneg)
2559 ncode = MINUS;
2560 if (lneg)
2561 tem = lhs, lhs = rhs, rhs = tem;
2563 else if (swap_commutative_operands_p (lhs, rhs))
2564 tem = lhs, lhs = rhs, rhs = tem;
2566 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2568 /* Reject "simplifications" that just wrap the two
2569 arguments in a CONST. Failure to do so can result
2570 in infinite recursion with simplify_binary_operation
2571 when it calls us to simplify CONST operations. */
2572 if (tem
2573 && ! (GET_CODE (tem) == CONST
2574 && GET_CODE (XEXP (tem, 0)) == ncode
2575 && XEXP (XEXP (tem, 0), 0) == lhs
2576 && XEXP (XEXP (tem, 0), 1) == rhs)
2577 /* Don't allow -x + -1 -> ~x simplifications in the
2578 first pass. This allows us the chance to combine
2579 the -1 with other constants. */
2580 && ! (first
2581 && GET_CODE (tem) == NOT
2582 && XEXP (tem, 0) == rhs))
2584 lneg &= rneg;
2585 if (GET_CODE (tem) == NEG)
2586 tem = XEXP (tem, 0), lneg = !lneg;
2587 if (GET_CODE (tem) == CONST_INT && lneg)
2588 tem = neg_const_int (mode, tem), lneg = 0;
2590 ops[i].op = tem;
2591 ops[i].neg = lneg;
2592 ops[j].op = NULL_RTX;
2593 changed = 1;
2598 first = 0;
2600 while (changed);
2602 /* Pack all the operands to the lower-numbered entries. */
2603 for (i = 0, j = 0; j < n_ops; j++)
2604 if (ops[j].op)
2605 ops[i++] = ops[j];
2606 n_ops = i;
2608 /* Sort the operations based on swap_commutative_operands_p. */
2609 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2611 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2612 if (n_ops == 2
2613 && GET_CODE (ops[1].op) == CONST_INT
2614 && CONSTANT_P (ops[0].op)
2615 && ops[0].neg)
2616 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2618 /* We suppressed creation of trivial CONST expressions in the
2619 combination loop to avoid recursion. Create one manually now.
2620 The combination loop should have ensured that there is exactly
2621 one CONST_INT, and the sort will have ensured that it is last
2622 in the array and that any other constant will be next-to-last. */
2624 if (n_ops > 1
2625 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2626 && CONSTANT_P (ops[n_ops - 2].op))
2628 rtx value = ops[n_ops - 1].op;
2629 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2630 value = neg_const_int (mode, value);
2631 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2632 n_ops--;
2635 /* Count the number of CONSTs that we generated. */
2636 n_consts = 0;
2637 for (i = 0; i < n_ops; i++)
2638 if (GET_CODE (ops[i].op) == CONST)
2639 n_consts++;
2641 /* Give up if we didn't reduce the number of operands we had. Make
2642 sure we count a CONST as two operands. If we have the same
2643 number of operands, but have made more CONSTs than before, this
2644 is also an improvement, so accept it. */
2645 if (!force
2646 && (n_ops + n_consts > input_ops
2647 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2648 return NULL_RTX;
2650 /* Put a non-negated operand first, if possible. */
2652 for (i = 0; i < n_ops && ops[i].neg; i++)
2653 continue;
2654 if (i == n_ops)
2655 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2656 else if (i != 0)
2658 tem = ops[0].op;
2659 ops[0] = ops[i];
2660 ops[i].op = tem;
2661 ops[i].neg = 1;
2664 /* Now make the result by performing the requested operations. */
2665 result = ops[0].op;
2666 for (i = 1; i < n_ops; i++)
2667 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2668 mode, result, ops[i].op);
2670 return result;
2673 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2674 static bool
2675 plus_minus_operand_p (rtx x)
2677 return GET_CODE (x) == PLUS
2678 || GET_CODE (x) == MINUS
2679 || (GET_CODE (x) == CONST
2680 && GET_CODE (XEXP (x, 0)) == PLUS
2681 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2682 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2685 /* Like simplify_binary_operation except used for relational operators.
2686 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2687 not also be VOIDmode.
2689 CMP_MODE specifies in which mode the comparison is done in, so it is
2690 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2691 the operands or, if both are VOIDmode, the operands are compared in
2692 "infinite precision". */
2694 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2695 enum machine_mode cmp_mode, rtx op0, rtx op1)
2697 rtx tem, trueop0, trueop1;
2699 if (cmp_mode == VOIDmode)
2700 cmp_mode = GET_MODE (op0);
2701 if (cmp_mode == VOIDmode)
2702 cmp_mode = GET_MODE (op1);
2704 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2705 if (tem)
2707 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2709 if (tem == const0_rtx)
2710 return CONST0_RTX (mode);
2711 #ifdef FLOAT_STORE_FLAG_VALUE
2713 REAL_VALUE_TYPE val;
2714 val = FLOAT_STORE_FLAG_VALUE (mode);
2715 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2717 #else
2718 return NULL_RTX;
2719 #endif
2721 if (VECTOR_MODE_P (mode))
2723 if (tem == const0_rtx)
2724 return CONST0_RTX (mode);
2725 #ifdef VECTOR_STORE_FLAG_VALUE
2727 int i, units;
2728 rtvec c;
2730 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2731 if (val == NULL_RTX)
2732 return NULL_RTX;
2733 if (val == const1_rtx)
2734 return CONST1_RTX (mode);
2736 units = GET_MODE_NUNITS (mode);
2737 v = rtvec_alloc (units);
2738 for (i = 0; i < units; i++)
2739 RTVEC_ELT (v, i) = val;
2740 return gen_rtx_raw_CONST_VECTOR (mode, v);
2742 #else
2743 return NULL_RTX;
2744 #endif
2747 return tem;
2750 /* For the following tests, ensure const0_rtx is op1. */
2751 if (swap_commutative_operands_p (op0, op1)
2752 || (op0 == const0_rtx && op1 != const0_rtx))
2753 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2755 /* If op0 is a compare, extract the comparison arguments from it. */
2756 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2757 return simplify_relational_operation (code, mode, VOIDmode,
2758 XEXP (op0, 0), XEXP (op0, 1));
2760 if (mode == VOIDmode
2761 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2762 || CC0_P (op0))
2763 return NULL_RTX;
2765 trueop0 = avoid_constant_pool_reference (op0);
2766 trueop1 = avoid_constant_pool_reference (op1);
2767 return simplify_relational_operation_1 (code, mode, cmp_mode,
2768 trueop0, trueop1);
2771 /* This part of simplify_relational_operation is only used when CMP_MODE
2772 is not in class MODE_CC (i.e. it is a real comparison).
2774 MODE is the mode of the result, while CMP_MODE specifies in which
2775 mode the comparison is done in, so it is the mode of the operands. */
2777 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2778 enum machine_mode cmp_mode, rtx op0, rtx op1)
2780 if (GET_CODE (op1) == CONST_INT)
2782 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2784 /* If op0 is a comparison, extract the comparison arguments form it. */
2785 if (code == NE)
2787 if (GET_MODE (op0) == cmp_mode)
2788 return simplify_rtx (op0);
2789 else
2790 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2791 XEXP (op0, 0), XEXP (op0, 1));
2793 else if (code == EQ)
2795 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2796 if (new_code != UNKNOWN)
2797 return simplify_gen_relational (new_code, mode, VOIDmode,
2798 XEXP (op0, 0), XEXP (op0, 1));
2803 return NULL_RTX;
2806 /* Check if the given comparison (done in the given MODE) is actually a
2807 tautology or a contradiction.
2808 If no simplification is possible, this function returns zero.
2809 Otherwise, it returns either const_true_rtx or const0_rtx. */
2812 simplify_const_relational_operation (enum rtx_code code,
2813 enum machine_mode mode,
2814 rtx op0, rtx op1)
2816 int equal, op0lt, op0ltu, op1lt, op1ltu;
2817 rtx tem;
2818 rtx trueop0;
2819 rtx trueop1;
2821 gcc_assert (mode != VOIDmode
2822 || (GET_MODE (op0) == VOIDmode
2823 && GET_MODE (op1) == VOIDmode));
2825 /* If op0 is a compare, extract the comparison arguments from it. */
2826 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2827 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2829 /* We can't simplify MODE_CC values since we don't know what the
2830 actual comparison is. */
2831 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2832 return 0;
2834 /* Make sure the constant is second. */
2835 if (swap_commutative_operands_p (op0, op1))
2837 tem = op0, op0 = op1, op1 = tem;
2838 code = swap_condition (code);
2841 trueop0 = avoid_constant_pool_reference (op0);
2842 trueop1 = avoid_constant_pool_reference (op1);
2844 /* For integer comparisons of A and B maybe we can simplify A - B and can
2845 then simplify a comparison of that with zero. If A and B are both either
2846 a register or a CONST_INT, this can't help; testing for these cases will
2847 prevent infinite recursion here and speed things up.
2849 If CODE is an unsigned comparison, then we can never do this optimization,
2850 because it gives an incorrect result if the subtraction wraps around zero.
2851 ANSI C defines unsigned operations such that they never overflow, and
2852 thus such cases can not be ignored; but we cannot do it even for
2853 signed comparisons for languages such as Java, so test flag_wrapv. */
2855 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2856 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2857 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2858 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2859 /* We cannot do this for == or != if tem is a nonzero address. */
2860 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2861 && code != GTU && code != GEU && code != LTU && code != LEU)
2862 return simplify_const_relational_operation (signed_condition (code),
2863 mode, tem, const0_rtx);
2865 if (flag_unsafe_math_optimizations && code == ORDERED)
2866 return const_true_rtx;
2868 if (flag_unsafe_math_optimizations && code == UNORDERED)
2869 return const0_rtx;
2871 /* For modes without NaNs, if the two operands are equal, we know the
2872 result except if they have side-effects. */
2873 if (! HONOR_NANS (GET_MODE (trueop0))
2874 && rtx_equal_p (trueop0, trueop1)
2875 && ! side_effects_p (trueop0))
2876 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2878 /* If the operands are floating-point constants, see if we can fold
2879 the result. */
2880 else if (GET_CODE (trueop0) == CONST_DOUBLE
2881 && GET_CODE (trueop1) == CONST_DOUBLE
2882 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2884 REAL_VALUE_TYPE d0, d1;
2886 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2887 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2889 /* Comparisons are unordered iff at least one of the values is NaN. */
2890 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2891 switch (code)
2893 case UNEQ:
2894 case UNLT:
2895 case UNGT:
2896 case UNLE:
2897 case UNGE:
2898 case NE:
2899 case UNORDERED:
2900 return const_true_rtx;
2901 case EQ:
2902 case LT:
2903 case GT:
2904 case LE:
2905 case GE:
2906 case LTGT:
2907 case ORDERED:
2908 return const0_rtx;
2909 default:
2910 return 0;
2913 equal = REAL_VALUES_EQUAL (d0, d1);
2914 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2915 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2918 /* Otherwise, see if the operands are both integers. */
2919 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2920 && (GET_CODE (trueop0) == CONST_DOUBLE
2921 || GET_CODE (trueop0) == CONST_INT)
2922 && (GET_CODE (trueop1) == CONST_DOUBLE
2923 || GET_CODE (trueop1) == CONST_INT))
2925 int width = GET_MODE_BITSIZE (mode);
2926 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2927 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2929 /* Get the two words comprising each integer constant. */
2930 if (GET_CODE (trueop0) == CONST_DOUBLE)
2932 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2933 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2935 else
2937 l0u = l0s = INTVAL (trueop0);
2938 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2941 if (GET_CODE (trueop1) == CONST_DOUBLE)
2943 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2944 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2946 else
2948 l1u = l1s = INTVAL (trueop1);
2949 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2952 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2953 we have to sign or zero-extend the values. */
2954 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2956 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2957 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2959 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2960 l0s |= ((HOST_WIDE_INT) (-1) << width);
2962 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2963 l1s |= ((HOST_WIDE_INT) (-1) << width);
2965 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2966 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2968 equal = (h0u == h1u && l0u == l1u);
2969 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2970 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2971 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2972 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2975 /* Otherwise, there are some code-specific tests we can make. */
2976 else
2978 /* Optimize comparisons with upper and lower bounds. */
2979 if (SCALAR_INT_MODE_P (mode)
2980 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2982 rtx mmin, mmax;
2983 int sign;
2985 if (code == GEU
2986 || code == LEU
2987 || code == GTU
2988 || code == LTU)
2989 sign = 0;
2990 else
2991 sign = 1;
2993 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
2995 tem = NULL_RTX;
2996 switch (code)
2998 case GEU:
2999 case GE:
3000 /* x >= min is always true. */
3001 if (rtx_equal_p (trueop1, mmin))
3002 tem = const_true_rtx;
3003 else
3004 break;
3006 case LEU:
3007 case LE:
3008 /* x <= max is always true. */
3009 if (rtx_equal_p (trueop1, mmax))
3010 tem = const_true_rtx;
3011 break;
3013 case GTU:
3014 case GT:
3015 /* x > max is always false. */
3016 if (rtx_equal_p (trueop1, mmax))
3017 tem = const0_rtx;
3018 break;
3020 case LTU:
3021 case LT:
3022 /* x < min is always false. */
3023 if (rtx_equal_p (trueop1, mmin))
3024 tem = const0_rtx;
3025 break;
3027 default:
3028 break;
3030 if (tem == const0_rtx
3031 || tem == const_true_rtx)
3032 return tem;
3035 switch (code)
3037 case EQ:
3038 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3039 return const0_rtx;
3040 break;
3042 case NE:
3043 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3044 return const_true_rtx;
3045 break;
3047 case LT:
3048 /* Optimize abs(x) < 0.0. */
3049 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3051 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3052 : trueop0;
3053 if (GET_CODE (tem) == ABS)
3054 return const0_rtx;
3056 break;
3058 case GE:
3059 /* Optimize abs(x) >= 0.0. */
3060 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3062 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3063 : trueop0;
3064 if (GET_CODE (tem) == ABS)
3065 return const_true_rtx;
3067 break;
3069 case UNGE:
3070 /* Optimize ! (abs(x) < 0.0). */
3071 if (trueop1 == CONST0_RTX (mode))
3073 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3074 : trueop0;
3075 if (GET_CODE (tem) == ABS)
3076 return const_true_rtx;
3078 break;
3080 default:
3081 break;
3084 return 0;
3087 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3088 as appropriate. */
3089 switch (code)
3091 case EQ:
3092 case UNEQ:
3093 return equal ? const_true_rtx : const0_rtx;
3094 case NE:
3095 case LTGT:
3096 return ! equal ? const_true_rtx : const0_rtx;
3097 case LT:
3098 case UNLT:
3099 return op0lt ? const_true_rtx : const0_rtx;
3100 case GT:
3101 case UNGT:
3102 return op1lt ? const_true_rtx : const0_rtx;
3103 case LTU:
3104 return op0ltu ? const_true_rtx : const0_rtx;
3105 case GTU:
3106 return op1ltu ? const_true_rtx : const0_rtx;
3107 case LE:
3108 case UNLE:
3109 return equal || op0lt ? const_true_rtx : const0_rtx;
3110 case GE:
3111 case UNGE:
3112 return equal || op1lt ? const_true_rtx : const0_rtx;
3113 case LEU:
3114 return equal || op0ltu ? const_true_rtx : const0_rtx;
3115 case GEU:
3116 return equal || op1ltu ? const_true_rtx : const0_rtx;
3117 case ORDERED:
3118 return const_true_rtx;
3119 case UNORDERED:
3120 return const0_rtx;
3121 default:
3122 gcc_unreachable ();
3126 /* Simplify CODE, an operation with result mode MODE and three operands,
3127 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3128 a constant. Return 0 if no simplifications is possible. */
3131 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3132 enum machine_mode op0_mode, rtx op0, rtx op1,
3133 rtx op2)
3135 unsigned int width = GET_MODE_BITSIZE (mode);
3137 /* VOIDmode means "infinite" precision. */
3138 if (width == 0)
3139 width = HOST_BITS_PER_WIDE_INT;
3141 switch (code)
3143 case SIGN_EXTRACT:
3144 case ZERO_EXTRACT:
3145 if (GET_CODE (op0) == CONST_INT
3146 && GET_CODE (op1) == CONST_INT
3147 && GET_CODE (op2) == CONST_INT
3148 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3149 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3151 /* Extracting a bit-field from a constant */
3152 HOST_WIDE_INT val = INTVAL (op0);
3154 if (BITS_BIG_ENDIAN)
3155 val >>= (GET_MODE_BITSIZE (op0_mode)
3156 - INTVAL (op2) - INTVAL (op1));
3157 else
3158 val >>= INTVAL (op2);
3160 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3162 /* First zero-extend. */
3163 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3164 /* If desired, propagate sign bit. */
3165 if (code == SIGN_EXTRACT
3166 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3167 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3170 /* Clear the bits that don't belong in our mode,
3171 unless they and our sign bit are all one.
3172 So we get either a reasonable negative value or a reasonable
3173 unsigned value for this mode. */
3174 if (width < HOST_BITS_PER_WIDE_INT
3175 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3176 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3177 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3179 return gen_int_mode (val, mode);
3181 break;
3183 case IF_THEN_ELSE:
3184 if (GET_CODE (op0) == CONST_INT)
3185 return op0 != const0_rtx ? op1 : op2;
3187 /* Convert c ? a : a into "a". */
3188 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3189 return op1;
3191 /* Convert a != b ? a : b into "a". */
3192 if (GET_CODE (op0) == NE
3193 && ! side_effects_p (op0)
3194 && ! HONOR_NANS (mode)
3195 && ! HONOR_SIGNED_ZEROS (mode)
3196 && ((rtx_equal_p (XEXP (op0, 0), op1)
3197 && rtx_equal_p (XEXP (op0, 1), op2))
3198 || (rtx_equal_p (XEXP (op0, 0), op2)
3199 && rtx_equal_p (XEXP (op0, 1), op1))))
3200 return op1;
3202 /* Convert a == b ? a : b into "b". */
3203 if (GET_CODE (op0) == EQ
3204 && ! side_effects_p (op0)
3205 && ! HONOR_NANS (mode)
3206 && ! HONOR_SIGNED_ZEROS (mode)
3207 && ((rtx_equal_p (XEXP (op0, 0), op1)
3208 && rtx_equal_p (XEXP (op0, 1), op2))
3209 || (rtx_equal_p (XEXP (op0, 0), op2)
3210 && rtx_equal_p (XEXP (op0, 1), op1))))
3211 return op2;
3213 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3215 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3216 ? GET_MODE (XEXP (op0, 1))
3217 : GET_MODE (XEXP (op0, 0)));
3218 rtx temp;
3220 /* Look for happy constants in op1 and op2. */
3221 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3223 HOST_WIDE_INT t = INTVAL (op1);
3224 HOST_WIDE_INT f = INTVAL (op2);
3226 if (t == STORE_FLAG_VALUE && f == 0)
3227 code = GET_CODE (op0);
3228 else if (t == 0 && f == STORE_FLAG_VALUE)
3230 enum rtx_code tmp;
3231 tmp = reversed_comparison_code (op0, NULL_RTX);
3232 if (tmp == UNKNOWN)
3233 break;
3234 code = tmp;
3236 else
3237 break;
3239 return simplify_gen_relational (code, mode, cmp_mode,
3240 XEXP (op0, 0), XEXP (op0, 1));
3243 if (cmp_mode == VOIDmode)
3244 cmp_mode = op0_mode;
3245 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3246 cmp_mode, XEXP (op0, 0),
3247 XEXP (op0, 1));
3249 /* See if any simplifications were possible. */
3250 if (temp)
3252 if (GET_CODE (temp) == CONST_INT)
3253 return temp == const0_rtx ? op2 : op1;
3254 else if (temp)
3255 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3258 break;
3260 case VEC_MERGE:
3261 gcc_assert (GET_MODE (op0) == mode);
3262 gcc_assert (GET_MODE (op1) == mode);
3263 gcc_assert (VECTOR_MODE_P (mode));
3264 op2 = avoid_constant_pool_reference (op2);
3265 if (GET_CODE (op2) == CONST_INT)
3267 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3268 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3269 int mask = (1 << n_elts) - 1;
3271 if (!(INTVAL (op2) & mask))
3272 return op1;
3273 if ((INTVAL (op2) & mask) == mask)
3274 return op0;
3276 op0 = avoid_constant_pool_reference (op0);
3277 op1 = avoid_constant_pool_reference (op1);
3278 if (GET_CODE (op0) == CONST_VECTOR
3279 && GET_CODE (op1) == CONST_VECTOR)
3281 rtvec v = rtvec_alloc (n_elts);
3282 unsigned int i;
3284 for (i = 0; i < n_elts; i++)
3285 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3286 ? CONST_VECTOR_ELT (op0, i)
3287 : CONST_VECTOR_ELT (op1, i));
3288 return gen_rtx_CONST_VECTOR (mode, v);
3291 break;
3293 default:
3294 gcc_unreachable ();
3297 return 0;
3300 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3301 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3303 Works by unpacking OP into a collection of 8-bit values
3304 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3305 and then repacking them again for OUTERMODE. */
3307 static rtx
3308 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3309 enum machine_mode innermode, unsigned int byte)
3311 /* We support up to 512-bit values (for V8DFmode). */
3312 enum {
3313 max_bitsize = 512,
3314 value_bit = 8,
3315 value_mask = (1 << value_bit) - 1
3317 unsigned char value[max_bitsize / value_bit];
3318 int value_start;
3319 int i;
3320 int elem;
3322 int num_elem;
3323 rtx * elems;
3324 int elem_bitsize;
3325 rtx result_s;
3326 rtvec result_v = NULL;
3327 enum mode_class outer_class;
3328 enum machine_mode outer_submode;
3330 /* Some ports misuse CCmode. */
3331 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3332 return op;
3334 /* We have no way to represent a complex constant at the rtl level. */
3335 if (COMPLEX_MODE_P (outermode))
3336 return NULL_RTX;
3338 /* Unpack the value. */
3340 if (GET_CODE (op) == CONST_VECTOR)
3342 num_elem = CONST_VECTOR_NUNITS (op);
3343 elems = &CONST_VECTOR_ELT (op, 0);
3344 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3346 else
3348 num_elem = 1;
3349 elems = &op;
3350 elem_bitsize = max_bitsize;
3352 /* If this asserts, it is too complicated; reducing value_bit may help. */
3353 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3354 /* I don't know how to handle endianness of sub-units. */
3355 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3357 for (elem = 0; elem < num_elem; elem++)
3359 unsigned char * vp;
3360 rtx el = elems[elem];
3362 /* Vectors are kept in target memory order. (This is probably
3363 a mistake.) */
3365 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3366 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3367 / BITS_PER_UNIT);
3368 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3369 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3370 unsigned bytele = (subword_byte % UNITS_PER_WORD
3371 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3372 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3375 switch (GET_CODE (el))
3377 case CONST_INT:
3378 for (i = 0;
3379 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3380 i += value_bit)
3381 *vp++ = INTVAL (el) >> i;
3382 /* CONST_INTs are always logically sign-extended. */
3383 for (; i < elem_bitsize; i += value_bit)
3384 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3385 break;
3387 case CONST_DOUBLE:
3388 if (GET_MODE (el) == VOIDmode)
3390 /* If this triggers, someone should have generated a
3391 CONST_INT instead. */
3392 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3394 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3395 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3396 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3398 *vp++
3399 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3400 i += value_bit;
3402 /* It shouldn't matter what's done here, so fill it with
3403 zero. */
3404 for (; i < max_bitsize; i += value_bit)
3405 *vp++ = 0;
3407 else
3409 long tmp[max_bitsize / 32];
3410 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3412 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3413 gcc_assert (bitsize <= elem_bitsize);
3414 gcc_assert (bitsize % value_bit == 0);
3416 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3417 GET_MODE (el));
3419 /* real_to_target produces its result in words affected by
3420 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3421 and use WORDS_BIG_ENDIAN instead; see the documentation
3422 of SUBREG in rtl.texi. */
3423 for (i = 0; i < bitsize; i += value_bit)
3425 int ibase;
3426 if (WORDS_BIG_ENDIAN)
3427 ibase = bitsize - 1 - i;
3428 else
3429 ibase = i;
3430 *vp++ = tmp[ibase / 32] >> i % 32;
3433 /* It shouldn't matter what's done here, so fill it with
3434 zero. */
3435 for (; i < elem_bitsize; i += value_bit)
3436 *vp++ = 0;
3438 break;
3440 default:
3441 gcc_unreachable ();
3445 /* Now, pick the right byte to start with. */
3446 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3447 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3448 will already have offset 0. */
3449 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3451 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3452 - byte);
3453 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3454 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3455 byte = (subword_byte % UNITS_PER_WORD
3456 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3459 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3460 so if it's become negative it will instead be very large.) */
3461 gcc_assert (byte < GET_MODE_SIZE (innermode));
3463 /* Convert from bytes to chunks of size value_bit. */
3464 value_start = byte * (BITS_PER_UNIT / value_bit);
3466 /* Re-pack the value. */
3468 if (VECTOR_MODE_P (outermode))
3470 num_elem = GET_MODE_NUNITS (outermode);
3471 result_v = rtvec_alloc (num_elem);
3472 elems = &RTVEC_ELT (result_v, 0);
3473 outer_submode = GET_MODE_INNER (outermode);
3475 else
3477 num_elem = 1;
3478 elems = &result_s;
3479 outer_submode = outermode;
3482 outer_class = GET_MODE_CLASS (outer_submode);
3483 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3485 gcc_assert (elem_bitsize % value_bit == 0);
3486 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3488 for (elem = 0; elem < num_elem; elem++)
3490 unsigned char *vp;
3492 /* Vectors are stored in target memory order. (This is probably
3493 a mistake.) */
3495 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3496 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3497 / BITS_PER_UNIT);
3498 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3499 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3500 unsigned bytele = (subword_byte % UNITS_PER_WORD
3501 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3502 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3505 switch (outer_class)
3507 case MODE_INT:
3508 case MODE_PARTIAL_INT:
3510 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3512 for (i = 0;
3513 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3514 i += value_bit)
3515 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3516 for (; i < elem_bitsize; i += value_bit)
3517 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3518 << (i - HOST_BITS_PER_WIDE_INT));
3520 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3521 know why. */
3522 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3523 elems[elem] = gen_int_mode (lo, outer_submode);
3524 else
3525 elems[elem] = immed_double_const (lo, hi, outer_submode);
3527 break;
3529 case MODE_FLOAT:
3531 REAL_VALUE_TYPE r;
3532 long tmp[max_bitsize / 32];
3534 /* real_from_target wants its input in words affected by
3535 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3536 and use WORDS_BIG_ENDIAN instead; see the documentation
3537 of SUBREG in rtl.texi. */
3538 for (i = 0; i < max_bitsize / 32; i++)
3539 tmp[i] = 0;
3540 for (i = 0; i < elem_bitsize; i += value_bit)
3542 int ibase;
3543 if (WORDS_BIG_ENDIAN)
3544 ibase = elem_bitsize - 1 - i;
3545 else
3546 ibase = i;
3547 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3550 real_from_target (&r, tmp, outer_submode);
3551 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3553 break;
3555 default:
3556 gcc_unreachable ();
3559 if (VECTOR_MODE_P (outermode))
3560 return gen_rtx_CONST_VECTOR (outermode, result_v);
3561 else
3562 return result_s;
3565 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3566 Return 0 if no simplifications are possible. */
3568 simplify_subreg (enum machine_mode outermode, rtx op,
3569 enum machine_mode innermode, unsigned int byte)
3571 /* Little bit of sanity checking. */
3572 gcc_assert (innermode != VOIDmode);
3573 gcc_assert (outermode != VOIDmode);
3574 gcc_assert (innermode != BLKmode);
3575 gcc_assert (outermode != BLKmode);
3577 gcc_assert (GET_MODE (op) == innermode
3578 || GET_MODE (op) == VOIDmode);
3580 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3581 gcc_assert (byte < GET_MODE_SIZE (innermode));
3583 if (outermode == innermode && !byte)
3584 return op;
3586 if (GET_CODE (op) == CONST_INT
3587 || GET_CODE (op) == CONST_DOUBLE
3588 || GET_CODE (op) == CONST_VECTOR)
3589 return simplify_immed_subreg (outermode, op, innermode, byte);
3591 /* Changing mode twice with SUBREG => just change it once,
3592 or not at all if changing back op starting mode. */
3593 if (GET_CODE (op) == SUBREG)
3595 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3596 int final_offset = byte + SUBREG_BYTE (op);
3597 rtx newx;
3599 if (outermode == innermostmode
3600 && byte == 0 && SUBREG_BYTE (op) == 0)
3601 return SUBREG_REG (op);
3603 /* The SUBREG_BYTE represents offset, as if the value were stored
3604 in memory. Irritating exception is paradoxical subreg, where
3605 we define SUBREG_BYTE to be 0. On big endian machines, this
3606 value should be negative. For a moment, undo this exception. */
3607 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3609 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3610 if (WORDS_BIG_ENDIAN)
3611 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3612 if (BYTES_BIG_ENDIAN)
3613 final_offset += difference % UNITS_PER_WORD;
3615 if (SUBREG_BYTE (op) == 0
3616 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3618 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3619 if (WORDS_BIG_ENDIAN)
3620 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3621 if (BYTES_BIG_ENDIAN)
3622 final_offset += difference % UNITS_PER_WORD;
3625 /* See whether resulting subreg will be paradoxical. */
3626 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3628 /* In nonparadoxical subregs we can't handle negative offsets. */
3629 if (final_offset < 0)
3630 return NULL_RTX;
3631 /* Bail out in case resulting subreg would be incorrect. */
3632 if (final_offset % GET_MODE_SIZE (outermode)
3633 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3634 return NULL_RTX;
3636 else
3638 int offset = 0;
3639 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3641 /* In paradoxical subreg, see if we are still looking on lower part.
3642 If so, our SUBREG_BYTE will be 0. */
3643 if (WORDS_BIG_ENDIAN)
3644 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3645 if (BYTES_BIG_ENDIAN)
3646 offset += difference % UNITS_PER_WORD;
3647 if (offset == final_offset)
3648 final_offset = 0;
3649 else
3650 return NULL_RTX;
3653 /* Recurse for further possible simplifications. */
3654 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3655 final_offset);
3656 if (newx)
3657 return newx;
3658 if (validate_subreg (outermode, innermostmode,
3659 SUBREG_REG (op), final_offset))
3660 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3661 return NULL_RTX;
3664 /* SUBREG of a hard register => just change the register number
3665 and/or mode. If the hard register is not valid in that mode,
3666 suppress this simplification. If the hard register is the stack,
3667 frame, or argument pointer, leave this as a SUBREG. */
3669 if (REG_P (op)
3670 && REGNO (op) < FIRST_PSEUDO_REGISTER
3671 #ifdef CANNOT_CHANGE_MODE_CLASS
3672 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3673 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3674 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3675 #endif
3676 && ((reload_completed && !frame_pointer_needed)
3677 || (REGNO (op) != FRAME_POINTER_REGNUM
3678 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3679 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3680 #endif
3682 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3683 && REGNO (op) != ARG_POINTER_REGNUM
3684 #endif
3685 && REGNO (op) != STACK_POINTER_REGNUM
3686 && subreg_offset_representable_p (REGNO (op), innermode,
3687 byte, outermode))
3689 unsigned int regno = REGNO (op);
3690 unsigned int final_regno
3691 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3693 /* ??? We do allow it if the current REG is not valid for
3694 its mode. This is a kludge to work around how float/complex
3695 arguments are passed on 32-bit SPARC and should be fixed. */
3696 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3697 || ! HARD_REGNO_MODE_OK (regno, innermode))
3699 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3701 /* Propagate original regno. We don't have any way to specify
3702 the offset inside original regno, so do so only for lowpart.
3703 The information is used only by alias analysis that can not
3704 grog partial register anyway. */
3706 if (subreg_lowpart_offset (outermode, innermode) == byte)
3707 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3708 return x;
3712 /* If we have a SUBREG of a register that we are replacing and we are
3713 replacing it with a MEM, make a new MEM and try replacing the
3714 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3715 or if we would be widening it. */
3717 if (MEM_P (op)
3718 && ! mode_dependent_address_p (XEXP (op, 0))
3719 /* Allow splitting of volatile memory references in case we don't
3720 have instruction to move the whole thing. */
3721 && (! MEM_VOLATILE_P (op)
3722 || ! have_insn_for (SET, innermode))
3723 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3724 return adjust_address_nv (op, outermode, byte);
3726 /* Handle complex values represented as CONCAT
3727 of real and imaginary part. */
3728 if (GET_CODE (op) == CONCAT)
3730 unsigned int inner_size, final_offset;
3731 rtx part, res;
3733 inner_size = GET_MODE_UNIT_SIZE (innermode);
3734 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3735 final_offset = byte % inner_size;
3736 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3737 return NULL_RTX;
3739 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3740 if (res)
3741 return res;
3742 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3743 return gen_rtx_SUBREG (outermode, part, final_offset);
3744 return NULL_RTX;
3747 /* Optimize SUBREG truncations of zero and sign extended values. */
3748 if ((GET_CODE (op) == ZERO_EXTEND
3749 || GET_CODE (op) == SIGN_EXTEND)
3750 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3752 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3754 /* If we're requesting the lowpart of a zero or sign extension,
3755 there are three possibilities. If the outermode is the same
3756 as the origmode, we can omit both the extension and the subreg.
3757 If the outermode is not larger than the origmode, we can apply
3758 the truncation without the extension. Finally, if the outermode
3759 is larger than the origmode, but both are integer modes, we
3760 can just extend to the appropriate mode. */
3761 if (bitpos == 0)
3763 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3764 if (outermode == origmode)
3765 return XEXP (op, 0);
3766 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3767 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3768 subreg_lowpart_offset (outermode,
3769 origmode));
3770 if (SCALAR_INT_MODE_P (outermode))
3771 return simplify_gen_unary (GET_CODE (op), outermode,
3772 XEXP (op, 0), origmode);
3775 /* A SUBREG resulting from a zero extension may fold to zero if
3776 it extracts higher bits that the ZERO_EXTEND's source bits. */
3777 if (GET_CODE (op) == ZERO_EXTEND
3778 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3779 return CONST0_RTX (outermode);
3782 return NULL_RTX;
3785 /* Make a SUBREG operation or equivalent if it folds. */
3788 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3789 enum machine_mode innermode, unsigned int byte)
3791 rtx newx;
3793 newx = simplify_subreg (outermode, op, innermode, byte);
3794 if (newx)
3795 return newx;
3797 if (GET_CODE (op) == SUBREG
3798 || GET_CODE (op) == CONCAT
3799 || GET_MODE (op) == VOIDmode)
3800 return NULL_RTX;
3802 if (validate_subreg (outermode, innermode, op, byte))
3803 return gen_rtx_SUBREG (outermode, op, byte);
3805 return NULL_RTX;
3808 /* Simplify X, an rtx expression.
3810 Return the simplified expression or NULL if no simplifications
3811 were possible.
3813 This is the preferred entry point into the simplification routines;
3814 however, we still allow passes to call the more specific routines.
3816 Right now GCC has three (yes, three) major bodies of RTL simplification
3817 code that need to be unified.
3819 1. fold_rtx in cse.c. This code uses various CSE specific
3820 information to aid in RTL simplification.
3822 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3823 it uses combine specific information to aid in RTL
3824 simplification.
3826 3. The routines in this file.
3829 Long term we want to only have one body of simplification code; to
3830 get to that state I recommend the following steps:
3832 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3833 which are not pass dependent state into these routines.
3835 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3836 use this routine whenever possible.
3838 3. Allow for pass dependent state to be provided to these
3839 routines and add simplifications based on the pass dependent
3840 state. Remove code from cse.c & combine.c that becomes
3841 redundant/dead.
3843 It will take time, but ultimately the compiler will be easier to
3844 maintain and improve. It's totally silly that when we add a
3845 simplification that it needs to be added to 4 places (3 for RTL
3846 simplification and 1 for tree simplification. */
3849 simplify_rtx (rtx x)
3851 enum rtx_code code = GET_CODE (x);
3852 enum machine_mode mode = GET_MODE (x);
3854 switch (GET_RTX_CLASS (code))
3856 case RTX_UNARY:
3857 return simplify_unary_operation (code, mode,
3858 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3859 case RTX_COMM_ARITH:
3860 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3861 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3863 /* Fall through.... */
3865 case RTX_BIN_ARITH:
3866 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3868 case RTX_TERNARY:
3869 case RTX_BITFIELD_OPS:
3870 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3871 XEXP (x, 0), XEXP (x, 1),
3872 XEXP (x, 2));
3874 case RTX_COMPARE:
3875 case RTX_COMM_COMPARE:
3876 return simplify_relational_operation (code, mode,
3877 ((GET_MODE (XEXP (x, 0))
3878 != VOIDmode)
3879 ? GET_MODE (XEXP (x, 0))
3880 : GET_MODE (XEXP (x, 1))),
3881 XEXP (x, 0),
3882 XEXP (x, 1));
3884 case RTX_EXTRA:
3885 if (code == SUBREG)
3886 return simplify_gen_subreg (mode, SUBREG_REG (x),
3887 GET_MODE (SUBREG_REG (x)),
3888 SUBREG_BYTE (x));
3889 break;
3891 case RTX_OBJ:
3892 if (code == LO_SUM)
3894 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3895 if (GET_CODE (XEXP (x, 0)) == HIGH
3896 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3897 return XEXP (x, 1);
3899 break;
3901 default:
3902 break;
3904 return NULL;