PR c++/8795
[official-gcc.git] / gcc / simplify-rtx.c
blob36a858536c7480ae5ab1273a239b4306e46755a4
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static bool associative_constant_p (rtx);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 rtx, rtx);
60 /* Negate a CONST_INT rtx, truncating (because a conversion from a
61 maximally negative number can overflow). */
62 static rtx
63 neg_const_int (enum machine_mode mode, rtx i)
65 return gen_int_mode (- INTVAL (i), mode);
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
72 rtx
73 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
74 rtx op1)
76 rtx tem;
78 /* Put complex operands first and constants second if commutative. */
79 if (GET_RTX_CLASS (code) == 'c'
80 && swap_commutative_operands_p (op0, op1))
81 tem = op0, op0 = op1, op1 = tem;
83 /* If this simplifies, do it. */
84 tem = simplify_binary_operation (code, mode, op0, op1);
85 if (tem)
86 return tem;
88 /* Handle addition and subtraction specially. Otherwise, just form
89 the operation. */
91 if (code == PLUS || code == MINUS)
93 tem = simplify_plus_minus (code, mode, op0, op1, 1);
94 if (tem)
95 return tem;
98 return gen_rtx_fmt_ee (code, mode, op0, op1);
101 /* If X is a MEM referencing the constant pool, return the real value.
102 Otherwise return X. */
104 avoid_constant_pool_reference (rtx x)
106 rtx c, tmp, addr;
107 enum machine_mode cmode;
109 switch (GET_CODE (x))
111 case MEM:
112 break;
114 case FLOAT_EXTEND:
115 /* Handle float extensions of constant pool references. */
116 tmp = XEXP (x, 0);
117 c = avoid_constant_pool_reference (tmp);
118 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
120 REAL_VALUE_TYPE d;
122 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
123 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
125 return x;
127 default:
128 return x;
131 addr = XEXP (x, 0);
133 /* Call target hook to avoid the effects of -fpic etc.... */
134 addr = (*targetm.delegitimize_address) (addr);
136 if (GET_CODE (addr) == LO_SUM)
137 addr = XEXP (addr, 1);
139 if (GET_CODE (addr) != SYMBOL_REF
140 || ! CONSTANT_POOL_ADDRESS_P (addr))
141 return x;
143 c = get_pool_constant (addr);
144 cmode = get_pool_mode (addr);
146 /* If we're accessing the constant in a different mode than it was
147 originally stored, attempt to fix that up via subreg simplifications.
148 If that fails we have no choice but to return the original memory. */
149 if (cmode != GET_MODE (x))
151 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
152 return c ? c : x;
155 return c;
158 /* Make a unary operation by first seeing if it folds and otherwise making
159 the specified operation. */
162 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
163 enum machine_mode op_mode)
165 rtx tem;
167 /* If this simplifies, use it. */
168 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
169 return tem;
171 return gen_rtx_fmt_e (code, mode, op);
174 /* Likewise for ternary operations. */
177 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
178 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
180 rtx tem;
182 /* If this simplifies, use it. */
183 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
184 op0, op1, op2)))
185 return tem;
187 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
195 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
196 enum machine_mode cmp_mode, rtx op0, rtx op1)
198 rtx tem;
200 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
201 return tem;
203 /* For the following tests, ensure const0_rtx is op1. */
204 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
205 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
207 /* If op0 is a compare, extract the comparison arguments from it. */
208 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
209 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
211 /* If op0 is a comparison, extract the comparison arguments form it. */
212 if (code == NE && op1 == const0_rtx
213 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
214 return op0;
215 else if (code == EQ && op1 == const0_rtx)
217 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
218 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
219 if (new != UNKNOWN)
221 code = new;
222 mode = cmp_mode;
223 op1 = XEXP (op0, 1);
224 op0 = XEXP (op0, 0);
228 /* Put complex operands first and constants second. */
229 if (swap_commutative_operands_p (op0, op1))
230 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
232 return gen_rtx_fmt_ee (code, mode, op0, op1);
235 /* Replace all occurrences of OLD in X with NEW and try to simplify the
236 resulting RTX. Return a new RTX which is as simplified as possible. */
239 simplify_replace_rtx (rtx x, rtx old, rtx new)
241 enum rtx_code code = GET_CODE (x);
242 enum machine_mode mode = GET_MODE (x);
244 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
245 to build a new expression substituting recursively. If we can't do
246 anything, return our input. */
248 if (x == old)
249 return new;
251 switch (GET_RTX_CLASS (code))
253 case '1':
255 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
256 rtx op = (XEXP (x, 0) == old
257 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
259 return simplify_gen_unary (code, mode, op, op_mode);
262 case '2':
263 case 'c':
264 return
265 simplify_gen_binary (code, mode,
266 simplify_replace_rtx (XEXP (x, 0), old, new),
267 simplify_replace_rtx (XEXP (x, 1), old, new));
268 case '<':
270 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
271 ? GET_MODE (XEXP (x, 0))
272 : GET_MODE (XEXP (x, 1)));
273 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
274 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
275 rtx temp = simplify_gen_relational (code, mode,
276 (op_mode != VOIDmode
277 ? op_mode
278 : GET_MODE (op0) != VOIDmode
279 ? GET_MODE (op0)
280 : GET_MODE (op1)),
281 op0, op1);
282 #ifdef FLOAT_STORE_FLAG_VALUE
283 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
285 if (temp == const0_rtx)
286 temp = CONST0_RTX (mode);
287 else if (temp == const_true_rtx)
288 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
289 mode);
291 #endif
292 return temp;
295 case '3':
296 case 'b':
298 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
299 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
301 return
302 simplify_gen_ternary (code, mode,
303 (op_mode != VOIDmode
304 ? op_mode
305 : GET_MODE (op0)),
306 op0,
307 simplify_replace_rtx (XEXP (x, 1), old, new),
308 simplify_replace_rtx (XEXP (x, 2), old, new));
311 case 'x':
312 /* The only case we try to handle is a SUBREG. */
313 if (code == SUBREG)
315 rtx exp;
316 exp = simplify_gen_subreg (GET_MODE (x),
317 simplify_replace_rtx (SUBREG_REG (x),
318 old, new),
319 GET_MODE (SUBREG_REG (x)),
320 SUBREG_BYTE (x));
321 if (exp)
322 x = exp;
324 return x;
326 case 'o':
327 if (code == MEM)
328 return replace_equiv_address_nv (x,
329 simplify_replace_rtx (XEXP (x, 0),
330 old, new));
331 else if (code == LO_SUM)
333 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
334 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
336 /* (lo_sum (high x) x) -> x */
337 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
338 return op1;
340 return gen_rtx_LO_SUM (mode, op0, op1);
342 else if (code == REG)
344 if (REG_P (old) && REGNO (x) == REGNO (old))
345 return new;
348 return x;
350 default:
351 return x;
353 return x;
356 /* Try to simplify a unary operation CODE whose output mode is to be
357 MODE with input operand OP whose mode was originally OP_MODE.
358 Return zero if no simplification can be made. */
360 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
361 rtx op, enum machine_mode op_mode)
363 unsigned int width = GET_MODE_BITSIZE (mode);
364 rtx trueop = avoid_constant_pool_reference (op);
366 if (code == VEC_DUPLICATE)
368 if (!VECTOR_MODE_P (mode))
369 abort ();
370 if (GET_MODE (trueop) != VOIDmode
371 && !VECTOR_MODE_P (GET_MODE (trueop))
372 && GET_MODE_INNER (mode) != GET_MODE (trueop))
373 abort ();
374 if (GET_MODE (trueop) != VOIDmode
375 && VECTOR_MODE_P (GET_MODE (trueop))
376 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
377 abort ();
378 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
379 || GET_CODE (trueop) == CONST_VECTOR)
381 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
382 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
383 rtvec v = rtvec_alloc (n_elts);
384 unsigned int i;
386 if (GET_CODE (trueop) != CONST_VECTOR)
387 for (i = 0; i < n_elts; i++)
388 RTVEC_ELT (v, i) = trueop;
389 else
391 enum machine_mode inmode = GET_MODE (trueop);
392 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
393 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
395 if (in_n_elts >= n_elts || n_elts % in_n_elts)
396 abort ();
397 for (i = 0; i < n_elts; i++)
398 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
400 return gen_rtx_CONST_VECTOR (mode, v);
404 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
406 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
407 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
408 enum machine_mode opmode = GET_MODE (trueop);
409 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
410 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
411 rtvec v = rtvec_alloc (n_elts);
412 unsigned int i;
414 if (op_n_elts != n_elts)
415 abort ();
417 for (i = 0; i < n_elts; i++)
419 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
420 CONST_VECTOR_ELT (trueop, i),
421 GET_MODE_INNER (opmode));
422 if (!x)
423 return 0;
424 RTVEC_ELT (v, i) = x;
426 return gen_rtx_CONST_VECTOR (mode, v);
429 /* The order of these tests is critical so that, for example, we don't
430 check the wrong mode (input vs. output) for a conversion operation,
431 such as FIX. At some point, this should be simplified. */
433 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
434 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
436 HOST_WIDE_INT hv, lv;
437 REAL_VALUE_TYPE d;
439 if (GET_CODE (trueop) == CONST_INT)
440 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
441 else
442 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
444 REAL_VALUE_FROM_INT (d, lv, hv, mode);
445 d = real_value_truncate (mode, d);
446 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
448 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
449 && (GET_CODE (trueop) == CONST_DOUBLE
450 || GET_CODE (trueop) == CONST_INT))
452 HOST_WIDE_INT hv, lv;
453 REAL_VALUE_TYPE d;
455 if (GET_CODE (trueop) == CONST_INT)
456 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
457 else
458 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
460 if (op_mode == VOIDmode)
462 /* We don't know how to interpret negative-looking numbers in
463 this case, so don't try to fold those. */
464 if (hv < 0)
465 return 0;
467 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
469 else
470 hv = 0, lv &= GET_MODE_MASK (op_mode);
472 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
473 d = real_value_truncate (mode, d);
474 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
477 if (GET_CODE (trueop) == CONST_INT
478 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
480 HOST_WIDE_INT arg0 = INTVAL (trueop);
481 HOST_WIDE_INT val;
483 switch (code)
485 case NOT:
486 val = ~ arg0;
487 break;
489 case NEG:
490 val = - arg0;
491 break;
493 case ABS:
494 val = (arg0 >= 0 ? arg0 : - arg0);
495 break;
497 case FFS:
498 /* Don't use ffs here. Instead, get low order bit and then its
499 number. If arg0 is zero, this will return 0, as desired. */
500 arg0 &= GET_MODE_MASK (mode);
501 val = exact_log2 (arg0 & (- arg0)) + 1;
502 break;
504 case CLZ:
505 arg0 &= GET_MODE_MASK (mode);
506 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
508 else
509 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
510 break;
512 case CTZ:
513 arg0 &= GET_MODE_MASK (mode);
514 if (arg0 == 0)
516 /* Even if the value at zero is undefined, we have to come
517 up with some replacement. Seems good enough. */
518 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
519 val = GET_MODE_BITSIZE (mode);
521 else
522 val = exact_log2 (arg0 & -arg0);
523 break;
525 case POPCOUNT:
526 arg0 &= GET_MODE_MASK (mode);
527 val = 0;
528 while (arg0)
529 val++, arg0 &= arg0 - 1;
530 break;
532 case PARITY:
533 arg0 &= GET_MODE_MASK (mode);
534 val = 0;
535 while (arg0)
536 val++, arg0 &= arg0 - 1;
537 val &= 1;
538 break;
540 case TRUNCATE:
541 val = arg0;
542 break;
544 case ZERO_EXTEND:
545 /* When zero-extending a CONST_INT, we need to know its
546 original mode. */
547 if (op_mode == VOIDmode)
548 abort ();
549 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
551 /* If we were really extending the mode,
552 we would have to distinguish between zero-extension
553 and sign-extension. */
554 if (width != GET_MODE_BITSIZE (op_mode))
555 abort ();
556 val = arg0;
558 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
559 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
560 else
561 return 0;
562 break;
564 case SIGN_EXTEND:
565 if (op_mode == VOIDmode)
566 op_mode = mode;
567 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
569 /* If we were really extending the mode,
570 we would have to distinguish between zero-extension
571 and sign-extension. */
572 if (width != GET_MODE_BITSIZE (op_mode))
573 abort ();
574 val = arg0;
576 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
579 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
580 if (val
581 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
582 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
584 else
585 return 0;
586 break;
588 case SQRT:
589 case FLOAT_EXTEND:
590 case FLOAT_TRUNCATE:
591 case SS_TRUNCATE:
592 case US_TRUNCATE:
593 return 0;
595 default:
596 abort ();
599 val = trunc_int_for_mode (val, mode);
601 return GEN_INT (val);
604 /* We can do some operations on integer CONST_DOUBLEs. Also allow
605 for a DImode operation on a CONST_INT. */
606 else if (GET_MODE (trueop) == VOIDmode
607 && width <= HOST_BITS_PER_WIDE_INT * 2
608 && (GET_CODE (trueop) == CONST_DOUBLE
609 || GET_CODE (trueop) == CONST_INT))
611 unsigned HOST_WIDE_INT l1, lv;
612 HOST_WIDE_INT h1, hv;
614 if (GET_CODE (trueop) == CONST_DOUBLE)
615 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
616 else
617 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
619 switch (code)
621 case NOT:
622 lv = ~ l1;
623 hv = ~ h1;
624 break;
626 case NEG:
627 neg_double (l1, h1, &lv, &hv);
628 break;
630 case ABS:
631 if (h1 < 0)
632 neg_double (l1, h1, &lv, &hv);
633 else
634 lv = l1, hv = h1;
635 break;
637 case FFS:
638 hv = 0;
639 if (l1 == 0)
641 if (h1 == 0)
642 lv = 0;
643 else
644 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
646 else
647 lv = exact_log2 (l1 & -l1) + 1;
648 break;
650 case CLZ:
651 hv = 0;
652 if (h1 == 0)
653 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
654 else
655 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
656 - HOST_BITS_PER_WIDE_INT;
657 break;
659 case CTZ:
660 hv = 0;
661 if (l1 == 0)
663 if (h1 == 0)
664 lv = GET_MODE_BITSIZE (mode);
665 else
666 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
668 else
669 lv = exact_log2 (l1 & -l1);
670 break;
672 case POPCOUNT:
673 hv = 0;
674 lv = 0;
675 while (l1)
676 lv++, l1 &= l1 - 1;
677 while (h1)
678 lv++, h1 &= h1 - 1;
679 break;
681 case PARITY:
682 hv = 0;
683 lv = 0;
684 while (l1)
685 lv++, l1 &= l1 - 1;
686 while (h1)
687 lv++, h1 &= h1 - 1;
688 lv &= 1;
689 break;
691 case TRUNCATE:
692 /* This is just a change-of-mode, so do nothing. */
693 lv = l1, hv = h1;
694 break;
696 case ZERO_EXTEND:
697 if (op_mode == VOIDmode)
698 abort ();
700 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
701 return 0;
703 hv = 0;
704 lv = l1 & GET_MODE_MASK (op_mode);
705 break;
707 case SIGN_EXTEND:
708 if (op_mode == VOIDmode
709 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
710 return 0;
711 else
713 lv = l1 & GET_MODE_MASK (op_mode);
714 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
715 && (lv & ((HOST_WIDE_INT) 1
716 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
717 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
719 hv = HWI_SIGN_EXTEND (lv);
721 break;
723 case SQRT:
724 return 0;
726 default:
727 return 0;
730 return immed_double_const (lv, hv, mode);
733 else if (GET_CODE (trueop) == CONST_DOUBLE
734 && GET_MODE_CLASS (mode) == MODE_FLOAT)
736 REAL_VALUE_TYPE d, t;
737 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
739 switch (code)
741 case SQRT:
742 if (HONOR_SNANS (mode) && real_isnan (&d))
743 return 0;
744 real_sqrt (&t, mode, &d);
745 d = t;
746 break;
747 case ABS:
748 d = REAL_VALUE_ABS (d);
749 break;
750 case NEG:
751 d = REAL_VALUE_NEGATE (d);
752 break;
753 case FLOAT_TRUNCATE:
754 d = real_value_truncate (mode, d);
755 break;
756 case FLOAT_EXTEND:
757 /* All this does is change the mode. */
758 break;
759 case FIX:
760 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
761 break;
763 default:
764 abort ();
766 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
769 else if (GET_CODE (trueop) == CONST_DOUBLE
770 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
771 && GET_MODE_CLASS (mode) == MODE_INT
772 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
774 HOST_WIDE_INT i;
775 REAL_VALUE_TYPE d;
776 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
777 switch (code)
779 case FIX: i = REAL_VALUE_FIX (d); break;
780 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
781 default:
782 abort ();
784 return gen_int_mode (i, mode);
787 /* This was formerly used only for non-IEEE float.
788 eggert@twinsun.com says it is safe for IEEE also. */
789 else
791 enum rtx_code reversed;
792 /* There are some simplifications we can do even if the operands
793 aren't constant. */
794 switch (code)
796 case NOT:
797 /* (not (not X)) == X. */
798 if (GET_CODE (op) == NOT)
799 return XEXP (op, 0);
801 /* (not (eq X Y)) == (ne X Y), etc. */
802 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
803 && ((reversed = reversed_comparison_code (op, NULL_RTX))
804 != UNKNOWN))
805 return gen_rtx_fmt_ee (reversed,
806 op_mode, XEXP (op, 0), XEXP (op, 1));
807 break;
809 case NEG:
810 /* (neg (neg X)) == X. */
811 if (GET_CODE (op) == NEG)
812 return XEXP (op, 0);
813 break;
815 case SIGN_EXTEND:
816 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
817 becomes just the MINUS if its mode is MODE. This allows
818 folding switch statements on machines using casesi (such as
819 the VAX). */
820 if (GET_CODE (op) == TRUNCATE
821 && GET_MODE (XEXP (op, 0)) == mode
822 && GET_CODE (XEXP (op, 0)) == MINUS
823 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
824 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
825 return XEXP (op, 0);
827 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
828 if (! POINTERS_EXTEND_UNSIGNED
829 && mode == Pmode && GET_MODE (op) == ptr_mode
830 && (CONSTANT_P (op)
831 || (GET_CODE (op) == SUBREG
832 && GET_CODE (SUBREG_REG (op)) == REG
833 && REG_POINTER (SUBREG_REG (op))
834 && GET_MODE (SUBREG_REG (op)) == Pmode)))
835 return convert_memory_address (Pmode, op);
836 #endif
837 break;
839 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
840 case ZERO_EXTEND:
841 if (POINTERS_EXTEND_UNSIGNED > 0
842 && mode == Pmode && GET_MODE (op) == ptr_mode
843 && (CONSTANT_P (op)
844 || (GET_CODE (op) == SUBREG
845 && GET_CODE (SUBREG_REG (op)) == REG
846 && REG_POINTER (SUBREG_REG (op))
847 && GET_MODE (SUBREG_REG (op)) == Pmode)))
848 return convert_memory_address (Pmode, op);
849 break;
850 #endif
852 default:
853 break;
856 return 0;
860 /* Subroutine of simplify_associative_operation. Return true if rtx OP
861 is a suitable integer or floating point immediate constant. */
862 static bool
863 associative_constant_p (rtx op)
865 if (GET_CODE (op) == CONST_INT
866 || GET_CODE (op) == CONST_DOUBLE)
867 return true;
868 op = avoid_constant_pool_reference (op);
869 return GET_CODE (op) == CONST_INT
870 || GET_CODE (op) == CONST_DOUBLE;
873 /* Subroutine of simplify_binary_operation to simplify an associative
874 binary operation CODE with result mode MODE, operating on OP0 and OP1.
875 Return 0 if no simplification is possible. */
876 static rtx
877 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
878 rtx op0, rtx op1)
880 rtx tem;
882 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
883 if (GET_CODE (op0) == code
884 && associative_constant_p (op1)
885 && associative_constant_p (XEXP (op0, 1)))
887 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
888 if (! tem)
889 return tem;
890 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
893 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
894 if (GET_CODE (op0) == code
895 && GET_CODE (op1) == code
896 && associative_constant_p (XEXP (op0, 1))
897 && associative_constant_p (XEXP (op1, 1)))
899 rtx c = simplify_binary_operation (code, mode,
900 XEXP (op0, 1), XEXP (op1, 1));
901 if (! c)
902 return 0;
903 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
904 return simplify_gen_binary (code, mode, tem, c);
907 /* Canonicalize (x op c) op y as (x op y) op c. */
908 if (GET_CODE (op0) == code
909 && associative_constant_p (XEXP (op0, 1)))
911 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
912 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
915 /* Canonicalize x op (y op c) as (x op y) op c. */
916 if (GET_CODE (op1) == code
917 && associative_constant_p (XEXP (op1, 1)))
919 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
920 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
923 return 0;
926 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
927 and OP1. Return 0 if no simplification is possible.
929 Don't use this for relational operations such as EQ or LT.
930 Use simplify_relational_operation instead. */
932 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
933 rtx op0, rtx op1)
935 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
936 HOST_WIDE_INT val;
937 unsigned int width = GET_MODE_BITSIZE (mode);
938 rtx tem;
939 rtx trueop0 = avoid_constant_pool_reference (op0);
940 rtx trueop1 = avoid_constant_pool_reference (op1);
942 /* Relational operations don't work here. We must know the mode
943 of the operands in order to do the comparison correctly.
944 Assuming a full word can give incorrect results.
945 Consider comparing 128 with -128 in QImode. */
947 if (GET_RTX_CLASS (code) == '<')
948 abort ();
950 /* Make sure the constant is second. */
951 if (GET_RTX_CLASS (code) == 'c'
952 && swap_commutative_operands_p (trueop0, trueop1))
954 tem = op0, op0 = op1, op1 = tem;
955 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
958 if (VECTOR_MODE_P (mode)
959 && GET_CODE (trueop0) == CONST_VECTOR
960 && GET_CODE (trueop1) == CONST_VECTOR)
962 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
963 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
964 enum machine_mode op0mode = GET_MODE (trueop0);
965 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
966 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
967 enum machine_mode op1mode = GET_MODE (trueop1);
968 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
969 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
970 rtvec v = rtvec_alloc (n_elts);
971 unsigned int i;
973 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
974 abort ();
976 for (i = 0; i < n_elts; i++)
978 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
979 CONST_VECTOR_ELT (trueop0, i),
980 CONST_VECTOR_ELT (trueop1, i));
981 if (!x)
982 return 0;
983 RTVEC_ELT (v, i) = x;
986 return gen_rtx_CONST_VECTOR (mode, v);
989 if (GET_MODE_CLASS (mode) == MODE_FLOAT
990 && GET_CODE (trueop0) == CONST_DOUBLE
991 && GET_CODE (trueop1) == CONST_DOUBLE
992 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
994 REAL_VALUE_TYPE f0, f1, value;
996 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
997 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
998 f0 = real_value_truncate (mode, f0);
999 f1 = real_value_truncate (mode, f1);
1001 if (HONOR_SNANS (mode)
1002 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1003 return 0;
1005 if (code == DIV
1006 && REAL_VALUES_EQUAL (f1, dconst0)
1007 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1008 return 0;
1010 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1012 value = real_value_truncate (mode, value);
1013 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1016 /* We can fold some multi-word operations. */
1017 if (GET_MODE_CLASS (mode) == MODE_INT
1018 && width == HOST_BITS_PER_WIDE_INT * 2
1019 && (GET_CODE (trueop0) == CONST_DOUBLE
1020 || GET_CODE (trueop0) == CONST_INT)
1021 && (GET_CODE (trueop1) == CONST_DOUBLE
1022 || GET_CODE (trueop1) == CONST_INT))
1024 unsigned HOST_WIDE_INT l1, l2, lv;
1025 HOST_WIDE_INT h1, h2, hv;
1027 if (GET_CODE (trueop0) == CONST_DOUBLE)
1028 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1029 else
1030 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1032 if (GET_CODE (trueop1) == CONST_DOUBLE)
1033 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1034 else
1035 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1037 switch (code)
1039 case MINUS:
1040 /* A - B == A + (-B). */
1041 neg_double (l2, h2, &lv, &hv);
1042 l2 = lv, h2 = hv;
1044 /* Fall through.... */
1046 case PLUS:
1047 add_double (l1, h1, l2, h2, &lv, &hv);
1048 break;
1050 case MULT:
1051 mul_double (l1, h1, l2, h2, &lv, &hv);
1052 break;
1054 case DIV: case MOD: case UDIV: case UMOD:
1055 /* We'd need to include tree.h to do this and it doesn't seem worth
1056 it. */
1057 return 0;
1059 case AND:
1060 lv = l1 & l2, hv = h1 & h2;
1061 break;
1063 case IOR:
1064 lv = l1 | l2, hv = h1 | h2;
1065 break;
1067 case XOR:
1068 lv = l1 ^ l2, hv = h1 ^ h2;
1069 break;
1071 case SMIN:
1072 if (h1 < h2
1073 || (h1 == h2
1074 && ((unsigned HOST_WIDE_INT) l1
1075 < (unsigned HOST_WIDE_INT) l2)))
1076 lv = l1, hv = h1;
1077 else
1078 lv = l2, hv = h2;
1079 break;
1081 case SMAX:
1082 if (h1 > h2
1083 || (h1 == h2
1084 && ((unsigned HOST_WIDE_INT) l1
1085 > (unsigned HOST_WIDE_INT) l2)))
1086 lv = l1, hv = h1;
1087 else
1088 lv = l2, hv = h2;
1089 break;
1091 case UMIN:
1092 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1093 || (h1 == h2
1094 && ((unsigned HOST_WIDE_INT) l1
1095 < (unsigned HOST_WIDE_INT) l2)))
1096 lv = l1, hv = h1;
1097 else
1098 lv = l2, hv = h2;
1099 break;
1101 case UMAX:
1102 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1103 || (h1 == h2
1104 && ((unsigned HOST_WIDE_INT) l1
1105 > (unsigned HOST_WIDE_INT) l2)))
1106 lv = l1, hv = h1;
1107 else
1108 lv = l2, hv = h2;
1109 break;
1111 case LSHIFTRT: case ASHIFTRT:
1112 case ASHIFT:
1113 case ROTATE: case ROTATERT:
1114 #ifdef SHIFT_COUNT_TRUNCATED
1115 if (SHIFT_COUNT_TRUNCATED)
1116 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1117 #endif
1119 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1120 return 0;
1122 if (code == LSHIFTRT || code == ASHIFTRT)
1123 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1124 code == ASHIFTRT);
1125 else if (code == ASHIFT)
1126 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1127 else if (code == ROTATE)
1128 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1129 else /* code == ROTATERT */
1130 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1131 break;
1133 default:
1134 return 0;
1137 return immed_double_const (lv, hv, mode);
1140 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1141 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1143 /* Even if we can't compute a constant result,
1144 there are some cases worth simplifying. */
1146 switch (code)
1148 case PLUS:
1149 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1150 when x is NaN, infinite, or finite and nonzero. They aren't
1151 when x is -0 and the rounding mode is not towards -infinity,
1152 since (-0) + 0 is then 0. */
1153 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1154 return op0;
1156 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1157 transformations are safe even for IEEE. */
1158 if (GET_CODE (op0) == NEG)
1159 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1160 else if (GET_CODE (op1) == NEG)
1161 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1163 /* (~a) + 1 -> -a */
1164 if (INTEGRAL_MODE_P (mode)
1165 && GET_CODE (op0) == NOT
1166 && trueop1 == const1_rtx)
1167 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1169 /* Handle both-operands-constant cases. We can only add
1170 CONST_INTs to constants since the sum of relocatable symbols
1171 can't be handled by most assemblers. Don't add CONST_INT
1172 to CONST_INT since overflow won't be computed properly if wider
1173 than HOST_BITS_PER_WIDE_INT. */
1175 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1176 && GET_CODE (op1) == CONST_INT)
1177 return plus_constant (op0, INTVAL (op1));
1178 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1179 && GET_CODE (op0) == CONST_INT)
1180 return plus_constant (op1, INTVAL (op0));
1182 /* See if this is something like X * C - X or vice versa or
1183 if the multiplication is written as a shift. If so, we can
1184 distribute and make a new multiply, shift, or maybe just
1185 have X (if C is 2 in the example above). But don't make
1186 real multiply if we didn't have one before. */
1188 if (! FLOAT_MODE_P (mode))
1190 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1191 rtx lhs = op0, rhs = op1;
1192 int had_mult = 0;
1194 if (GET_CODE (lhs) == NEG)
1195 coeff0 = -1, lhs = XEXP (lhs, 0);
1196 else if (GET_CODE (lhs) == MULT
1197 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1199 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1200 had_mult = 1;
1202 else if (GET_CODE (lhs) == ASHIFT
1203 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1204 && INTVAL (XEXP (lhs, 1)) >= 0
1205 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1207 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1208 lhs = XEXP (lhs, 0);
1211 if (GET_CODE (rhs) == NEG)
1212 coeff1 = -1, rhs = XEXP (rhs, 0);
1213 else if (GET_CODE (rhs) == MULT
1214 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1216 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1217 had_mult = 1;
1219 else if (GET_CODE (rhs) == ASHIFT
1220 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1221 && INTVAL (XEXP (rhs, 1)) >= 0
1222 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1224 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1225 rhs = XEXP (rhs, 0);
1228 if (rtx_equal_p (lhs, rhs))
1230 tem = simplify_gen_binary (MULT, mode, lhs,
1231 GEN_INT (coeff0 + coeff1));
1232 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1236 /* If one of the operands is a PLUS or a MINUS, see if we can
1237 simplify this by the associative law.
1238 Don't use the associative law for floating point.
1239 The inaccuracy makes it nonassociative,
1240 and subtle programs can break if operations are associated. */
1242 if (INTEGRAL_MODE_P (mode)
1243 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1244 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1245 || (GET_CODE (op0) == CONST
1246 && GET_CODE (XEXP (op0, 0)) == PLUS)
1247 || (GET_CODE (op1) == CONST
1248 && GET_CODE (XEXP (op1, 0)) == PLUS))
1249 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1250 return tem;
1252 /* Reassociate floating point addition only when the user
1253 specifies unsafe math optimizations. */
1254 if (FLOAT_MODE_P (mode)
1255 && flag_unsafe_math_optimizations)
1257 tem = simplify_associative_operation (code, mode, op0, op1);
1258 if (tem)
1259 return tem;
1261 break;
1263 case COMPARE:
1264 #ifdef HAVE_cc0
1265 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1266 using cc0, in which case we want to leave it as a COMPARE
1267 so we can distinguish it from a register-register-copy.
1269 In IEEE floating point, x-0 is not the same as x. */
1271 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1272 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1273 && trueop1 == CONST0_RTX (mode))
1274 return op0;
1275 #endif
1277 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1278 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1279 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1280 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1282 rtx xop00 = XEXP (op0, 0);
1283 rtx xop10 = XEXP (op1, 0);
1285 #ifdef HAVE_cc0
1286 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1287 #else
1288 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1289 && GET_MODE (xop00) == GET_MODE (xop10)
1290 && REGNO (xop00) == REGNO (xop10)
1291 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1292 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1293 #endif
1294 return xop00;
1296 break;
1298 case MINUS:
1299 /* We can't assume x-x is 0 even with non-IEEE floating point,
1300 but since it is zero except in very strange circumstances, we
1301 will treat it as zero with -funsafe-math-optimizations. */
1302 if (rtx_equal_p (trueop0, trueop1)
1303 && ! side_effects_p (op0)
1304 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1305 return CONST0_RTX (mode);
1307 /* Change subtraction from zero into negation. (0 - x) is the
1308 same as -x when x is NaN, infinite, or finite and nonzero.
1309 But if the mode has signed zeros, and does not round towards
1310 -infinity, then 0 - 0 is 0, not -0. */
1311 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1312 return simplify_gen_unary (NEG, mode, op1, mode);
1314 /* (-1 - a) is ~a. */
1315 if (trueop0 == constm1_rtx)
1316 return simplify_gen_unary (NOT, mode, op1, mode);
1318 /* Subtracting 0 has no effect unless the mode has signed zeros
1319 and supports rounding towards -infinity. In such a case,
1320 0 - 0 is -0. */
1321 if (!(HONOR_SIGNED_ZEROS (mode)
1322 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1323 && trueop1 == CONST0_RTX (mode))
1324 return op0;
1326 /* See if this is something like X * C - X or vice versa or
1327 if the multiplication is written as a shift. If so, we can
1328 distribute and make a new multiply, shift, or maybe just
1329 have X (if C is 2 in the example above). But don't make
1330 real multiply if we didn't have one before. */
1332 if (! FLOAT_MODE_P (mode))
1334 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1335 rtx lhs = op0, rhs = op1;
1336 int had_mult = 0;
1338 if (GET_CODE (lhs) == NEG)
1339 coeff0 = -1, lhs = XEXP (lhs, 0);
1340 else if (GET_CODE (lhs) == MULT
1341 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1343 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1344 had_mult = 1;
1346 else if (GET_CODE (lhs) == ASHIFT
1347 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1348 && INTVAL (XEXP (lhs, 1)) >= 0
1349 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1351 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1352 lhs = XEXP (lhs, 0);
1355 if (GET_CODE (rhs) == NEG)
1356 coeff1 = - 1, rhs = XEXP (rhs, 0);
1357 else if (GET_CODE (rhs) == MULT
1358 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1360 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1361 had_mult = 1;
1363 else if (GET_CODE (rhs) == ASHIFT
1364 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1365 && INTVAL (XEXP (rhs, 1)) >= 0
1366 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1368 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1369 rhs = XEXP (rhs, 0);
1372 if (rtx_equal_p (lhs, rhs))
1374 tem = simplify_gen_binary (MULT, mode, lhs,
1375 GEN_INT (coeff0 - coeff1));
1376 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1380 /* (a - (-b)) -> (a + b). True even for IEEE. */
1381 if (GET_CODE (op1) == NEG)
1382 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1384 /* If one of the operands is a PLUS or a MINUS, see if we can
1385 simplify this by the associative law.
1386 Don't use the associative law for floating point.
1387 The inaccuracy makes it nonassociative,
1388 and subtle programs can break if operations are associated. */
1390 if (INTEGRAL_MODE_P (mode)
1391 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1392 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1393 || (GET_CODE (op0) == CONST
1394 && GET_CODE (XEXP (op0, 0)) == PLUS)
1395 || (GET_CODE (op1) == CONST
1396 && GET_CODE (XEXP (op1, 0)) == PLUS))
1397 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1398 return tem;
1400 /* Don't let a relocatable value get a negative coeff. */
1401 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1402 return simplify_gen_binary (PLUS, mode,
1403 op0,
1404 neg_const_int (mode, op1));
1406 /* (x - (x & y)) -> (x & ~y) */
1407 if (GET_CODE (op1) == AND)
1409 if (rtx_equal_p (op0, XEXP (op1, 0)))
1411 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1412 GET_MODE (XEXP (op1, 1)));
1413 return simplify_gen_binary (AND, mode, op0, tem);
1415 if (rtx_equal_p (op0, XEXP (op1, 1)))
1417 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1418 GET_MODE (XEXP (op1, 0)));
1419 return simplify_gen_binary (AND, mode, op0, tem);
1422 break;
1424 case MULT:
1425 if (trueop1 == constm1_rtx)
1426 return simplify_gen_unary (NEG, mode, op0, mode);
1428 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1429 x is NaN, since x * 0 is then also NaN. Nor is it valid
1430 when the mode has signed zeros, since multiplying a negative
1431 number by 0 will give -0, not 0. */
1432 if (!HONOR_NANS (mode)
1433 && !HONOR_SIGNED_ZEROS (mode)
1434 && trueop1 == CONST0_RTX (mode)
1435 && ! side_effects_p (op0))
1436 return op1;
1438 /* In IEEE floating point, x*1 is not equivalent to x for
1439 signalling NaNs. */
1440 if (!HONOR_SNANS (mode)
1441 && trueop1 == CONST1_RTX (mode))
1442 return op0;
1444 /* Convert multiply by constant power of two into shift unless
1445 we are still generating RTL. This test is a kludge. */
1446 if (GET_CODE (trueop1) == CONST_INT
1447 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1448 /* If the mode is larger than the host word size, and the
1449 uppermost bit is set, then this isn't a power of two due
1450 to implicit sign extension. */
1451 && (width <= HOST_BITS_PER_WIDE_INT
1452 || val != HOST_BITS_PER_WIDE_INT - 1)
1453 && ! rtx_equal_function_value_matters)
1454 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1456 /* x*2 is x+x and x*(-1) is -x */
1457 if (GET_CODE (trueop1) == CONST_DOUBLE
1458 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1459 && GET_MODE (op0) == mode)
1461 REAL_VALUE_TYPE d;
1462 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1464 if (REAL_VALUES_EQUAL (d, dconst2))
1465 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1467 if (REAL_VALUES_EQUAL (d, dconstm1))
1468 return simplify_gen_unary (NEG, mode, op0, mode);
1471 /* Reassociate multiplication, but for floating point MULTs
1472 only when the user specifies unsafe math optimizations. */
1473 if (! FLOAT_MODE_P (mode)
1474 || flag_unsafe_math_optimizations)
1476 tem = simplify_associative_operation (code, mode, op0, op1);
1477 if (tem)
1478 return tem;
1480 break;
1482 case IOR:
1483 if (trueop1 == const0_rtx)
1484 return op0;
1485 if (GET_CODE (trueop1) == CONST_INT
1486 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1487 == GET_MODE_MASK (mode)))
1488 return op1;
1489 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1490 return op0;
1491 /* A | (~A) -> -1 */
1492 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1493 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1494 && ! side_effects_p (op0)
1495 && GET_MODE_CLASS (mode) != MODE_CC)
1496 return constm1_rtx;
1497 tem = simplify_associative_operation (code, mode, op0, op1);
1498 if (tem)
1499 return tem;
1500 break;
1502 case XOR:
1503 if (trueop1 == const0_rtx)
1504 return op0;
1505 if (GET_CODE (trueop1) == CONST_INT
1506 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1507 == GET_MODE_MASK (mode)))
1508 return simplify_gen_unary (NOT, mode, op0, mode);
1509 if (trueop0 == trueop1 && ! side_effects_p (op0)
1510 && GET_MODE_CLASS (mode) != MODE_CC)
1511 return const0_rtx;
1512 tem = simplify_associative_operation (code, mode, op0, op1);
1513 if (tem)
1514 return tem;
1515 break;
1517 case AND:
1518 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1519 return const0_rtx;
1520 if (GET_CODE (trueop1) == CONST_INT
1521 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1522 == GET_MODE_MASK (mode)))
1523 return op0;
1524 if (trueop0 == trueop1 && ! side_effects_p (op0)
1525 && GET_MODE_CLASS (mode) != MODE_CC)
1526 return op0;
1527 /* A & (~A) -> 0 */
1528 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1529 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1530 && ! side_effects_p (op0)
1531 && GET_MODE_CLASS (mode) != MODE_CC)
1532 return const0_rtx;
1533 tem = simplify_associative_operation (code, mode, op0, op1);
1534 if (tem)
1535 return tem;
1536 break;
1538 case UDIV:
1539 /* Convert divide by power of two into shift (divide by 1 handled
1540 below). */
1541 if (GET_CODE (trueop1) == CONST_INT
1542 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1543 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1545 /* Fall through.... */
1547 case DIV:
1548 if (trueop1 == CONST1_RTX (mode))
1550 /* On some platforms DIV uses narrower mode than its
1551 operands. */
1552 rtx x = gen_lowpart_common (mode, op0);
1553 if (x)
1554 return x;
1555 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1556 return gen_lowpart_SUBREG (mode, op0);
1557 else
1558 return op0;
1561 /* Maybe change 0 / x to 0. This transformation isn't safe for
1562 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1563 Nor is it safe for modes with signed zeros, since dividing
1564 0 by a negative number gives -0, not 0. */
1565 if (!HONOR_NANS (mode)
1566 && !HONOR_SIGNED_ZEROS (mode)
1567 && trueop0 == CONST0_RTX (mode)
1568 && ! side_effects_p (op1))
1569 return op0;
1571 /* Change division by a constant into multiplication. Only do
1572 this with -funsafe-math-optimizations. */
1573 else if (GET_CODE (trueop1) == CONST_DOUBLE
1574 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1575 && trueop1 != CONST0_RTX (mode)
1576 && flag_unsafe_math_optimizations)
1578 REAL_VALUE_TYPE d;
1579 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1581 if (! REAL_VALUES_EQUAL (d, dconst0))
1583 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1584 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1585 return simplify_gen_binary (MULT, mode, op0, tem);
1588 break;
1590 case UMOD:
1591 /* Handle modulus by power of two (mod with 1 handled below). */
1592 if (GET_CODE (trueop1) == CONST_INT
1593 && exact_log2 (INTVAL (trueop1)) > 0)
1594 return simplify_gen_binary (AND, mode, op0,
1595 GEN_INT (INTVAL (op1) - 1));
1597 /* Fall through.... */
1599 case MOD:
1600 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1601 && ! side_effects_p (op0) && ! side_effects_p (op1))
1602 return const0_rtx;
1603 break;
1605 case ROTATERT:
1606 case ROTATE:
1607 case ASHIFTRT:
1608 /* Rotating ~0 always results in ~0. */
1609 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1610 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1611 && ! side_effects_p (op1))
1612 return op0;
1614 /* Fall through.... */
1616 case ASHIFT:
1617 case LSHIFTRT:
1618 if (trueop1 == const0_rtx)
1619 return op0;
1620 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1621 return op0;
1622 break;
1624 case SMIN:
1625 if (width <= HOST_BITS_PER_WIDE_INT
1626 && GET_CODE (trueop1) == CONST_INT
1627 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1628 && ! side_effects_p (op0))
1629 return op1;
1630 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1631 return op0;
1632 tem = simplify_associative_operation (code, mode, op0, op1);
1633 if (tem)
1634 return tem;
1635 break;
1637 case SMAX:
1638 if (width <= HOST_BITS_PER_WIDE_INT
1639 && GET_CODE (trueop1) == CONST_INT
1640 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1641 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1642 && ! side_effects_p (op0))
1643 return op1;
1644 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1645 return op0;
1646 tem = simplify_associative_operation (code, mode, op0, op1);
1647 if (tem)
1648 return tem;
1649 break;
1651 case UMIN:
1652 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1653 return op1;
1654 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1655 return op0;
1656 tem = simplify_associative_operation (code, mode, op0, op1);
1657 if (tem)
1658 return tem;
1659 break;
1661 case UMAX:
1662 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1663 return op1;
1664 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1665 return op0;
1666 tem = simplify_associative_operation (code, mode, op0, op1);
1667 if (tem)
1668 return tem;
1669 break;
1671 case SS_PLUS:
1672 case US_PLUS:
1673 case SS_MINUS:
1674 case US_MINUS:
1675 /* ??? There are simplifications that can be done. */
1676 return 0;
1678 case VEC_SELECT:
1679 if (!VECTOR_MODE_P (mode))
1681 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1682 || (mode
1683 != GET_MODE_INNER (GET_MODE (trueop0)))
1684 || GET_CODE (trueop1) != PARALLEL
1685 || XVECLEN (trueop1, 0) != 1
1686 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1687 abort ();
1689 if (GET_CODE (trueop0) == CONST_VECTOR)
1690 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1692 else
1694 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1695 || (GET_MODE_INNER (mode)
1696 != GET_MODE_INNER (GET_MODE (trueop0)))
1697 || GET_CODE (trueop1) != PARALLEL)
1698 abort ();
1700 if (GET_CODE (trueop0) == CONST_VECTOR)
1702 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1703 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1704 rtvec v = rtvec_alloc (n_elts);
1705 unsigned int i;
1707 if (XVECLEN (trueop1, 0) != (int) n_elts)
1708 abort ();
1709 for (i = 0; i < n_elts; i++)
1711 rtx x = XVECEXP (trueop1, 0, i);
1713 if (GET_CODE (x) != CONST_INT)
1714 abort ();
1715 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1718 return gen_rtx_CONST_VECTOR (mode, v);
1721 return 0;
1722 case VEC_CONCAT:
1724 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1725 ? GET_MODE (trueop0)
1726 : GET_MODE_INNER (mode));
1727 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1728 ? GET_MODE (trueop1)
1729 : GET_MODE_INNER (mode));
1731 if (!VECTOR_MODE_P (mode)
1732 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1733 != GET_MODE_SIZE (mode)))
1734 abort ();
1736 if ((VECTOR_MODE_P (op0_mode)
1737 && (GET_MODE_INNER (mode)
1738 != GET_MODE_INNER (op0_mode)))
1739 || (!VECTOR_MODE_P (op0_mode)
1740 && GET_MODE_INNER (mode) != op0_mode))
1741 abort ();
1743 if ((VECTOR_MODE_P (op1_mode)
1744 && (GET_MODE_INNER (mode)
1745 != GET_MODE_INNER (op1_mode)))
1746 || (!VECTOR_MODE_P (op1_mode)
1747 && GET_MODE_INNER (mode) != op1_mode))
1748 abort ();
1750 if ((GET_CODE (trueop0) == CONST_VECTOR
1751 || GET_CODE (trueop0) == CONST_INT
1752 || GET_CODE (trueop0) == CONST_DOUBLE)
1753 && (GET_CODE (trueop1) == CONST_VECTOR
1754 || GET_CODE (trueop1) == CONST_INT
1755 || GET_CODE (trueop1) == CONST_DOUBLE))
1757 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1758 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1759 rtvec v = rtvec_alloc (n_elts);
1760 unsigned int i;
1761 unsigned in_n_elts = 1;
1763 if (VECTOR_MODE_P (op0_mode))
1764 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1765 for (i = 0; i < n_elts; i++)
1767 if (i < in_n_elts)
1769 if (!VECTOR_MODE_P (op0_mode))
1770 RTVEC_ELT (v, i) = trueop0;
1771 else
1772 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1774 else
1776 if (!VECTOR_MODE_P (op1_mode))
1777 RTVEC_ELT (v, i) = trueop1;
1778 else
1779 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1780 i - in_n_elts);
1784 return gen_rtx_CONST_VECTOR (mode, v);
1787 return 0;
1789 default:
1790 abort ();
1793 return 0;
1796 /* Get the integer argument values in two forms:
1797 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1799 arg0 = INTVAL (trueop0);
1800 arg1 = INTVAL (trueop1);
1802 if (width < HOST_BITS_PER_WIDE_INT)
1804 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1805 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1807 arg0s = arg0;
1808 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1809 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1811 arg1s = arg1;
1812 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1813 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1815 else
1817 arg0s = arg0;
1818 arg1s = arg1;
1821 /* Compute the value of the arithmetic. */
1823 switch (code)
1825 case PLUS:
1826 val = arg0s + arg1s;
1827 break;
1829 case MINUS:
1830 val = arg0s - arg1s;
1831 break;
1833 case MULT:
1834 val = arg0s * arg1s;
1835 break;
1837 case DIV:
1838 if (arg1s == 0
1839 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1840 && arg1s == -1))
1841 return 0;
1842 val = arg0s / arg1s;
1843 break;
1845 case MOD:
1846 if (arg1s == 0
1847 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1848 && arg1s == -1))
1849 return 0;
1850 val = arg0s % arg1s;
1851 break;
1853 case UDIV:
1854 if (arg1 == 0
1855 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1856 && arg1s == -1))
1857 return 0;
1858 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1859 break;
1861 case UMOD:
1862 if (arg1 == 0
1863 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1864 && arg1s == -1))
1865 return 0;
1866 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1867 break;
1869 case AND:
1870 val = arg0 & arg1;
1871 break;
1873 case IOR:
1874 val = arg0 | arg1;
1875 break;
1877 case XOR:
1878 val = arg0 ^ arg1;
1879 break;
1881 case LSHIFTRT:
1882 /* If shift count is undefined, don't fold it; let the machine do
1883 what it wants. But truncate it if the machine will do that. */
1884 if (arg1 < 0)
1885 return 0;
1887 #ifdef SHIFT_COUNT_TRUNCATED
1888 if (SHIFT_COUNT_TRUNCATED)
1889 arg1 %= width;
1890 #endif
1892 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1893 break;
1895 case ASHIFT:
1896 if (arg1 < 0)
1897 return 0;
1899 #ifdef SHIFT_COUNT_TRUNCATED
1900 if (SHIFT_COUNT_TRUNCATED)
1901 arg1 %= width;
1902 #endif
1904 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1905 break;
1907 case ASHIFTRT:
1908 if (arg1 < 0)
1909 return 0;
1911 #ifdef SHIFT_COUNT_TRUNCATED
1912 if (SHIFT_COUNT_TRUNCATED)
1913 arg1 %= width;
1914 #endif
1916 val = arg0s >> arg1;
1918 /* Bootstrap compiler may not have sign extended the right shift.
1919 Manually extend the sign to insure bootstrap cc matches gcc. */
1920 if (arg0s < 0 && arg1 > 0)
1921 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1923 break;
1925 case ROTATERT:
1926 if (arg1 < 0)
1927 return 0;
1929 arg1 %= width;
1930 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1931 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1932 break;
1934 case ROTATE:
1935 if (arg1 < 0)
1936 return 0;
1938 arg1 %= width;
1939 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1940 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1941 break;
1943 case COMPARE:
1944 /* Do nothing here. */
1945 return 0;
1947 case SMIN:
1948 val = arg0s <= arg1s ? arg0s : arg1s;
1949 break;
1951 case UMIN:
1952 val = ((unsigned HOST_WIDE_INT) arg0
1953 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1954 break;
1956 case SMAX:
1957 val = arg0s > arg1s ? arg0s : arg1s;
1958 break;
1960 case UMAX:
1961 val = ((unsigned HOST_WIDE_INT) arg0
1962 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1963 break;
1965 case SS_PLUS:
1966 case US_PLUS:
1967 case SS_MINUS:
1968 case US_MINUS:
1969 /* ??? There are simplifications that can be done. */
1970 return 0;
1972 default:
1973 abort ();
1976 val = trunc_int_for_mode (val, mode);
1978 return GEN_INT (val);
1981 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1982 PLUS or MINUS.
1984 Rather than test for specific case, we do this by a brute-force method
1985 and do all possible simplifications until no more changes occur. Then
1986 we rebuild the operation.
1988 If FORCE is true, then always generate the rtx. This is used to
1989 canonicalize stuff emitted from simplify_gen_binary. Note that this
1990 can still fail if the rtx is too complex. It won't fail just because
1991 the result is not 'simpler' than the input, however. */
1993 struct simplify_plus_minus_op_data
1995 rtx op;
1996 int neg;
1999 static int
2000 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2002 const struct simplify_plus_minus_op_data *d1 = p1;
2003 const struct simplify_plus_minus_op_data *d2 = p2;
2005 return (commutative_operand_precedence (d2->op)
2006 - commutative_operand_precedence (d1->op));
2009 static rtx
2010 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2011 rtx op1, int force)
2013 struct simplify_plus_minus_op_data ops[8];
2014 rtx result, tem;
2015 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2016 int first, negate, changed;
2017 int i, j;
2019 memset (ops, 0, sizeof ops);
2021 /* Set up the two operands and then expand them until nothing has been
2022 changed. If we run out of room in our array, give up; this should
2023 almost never happen. */
2025 ops[0].op = op0;
2026 ops[0].neg = 0;
2027 ops[1].op = op1;
2028 ops[1].neg = (code == MINUS);
2032 changed = 0;
2034 for (i = 0; i < n_ops; i++)
2036 rtx this_op = ops[i].op;
2037 int this_neg = ops[i].neg;
2038 enum rtx_code this_code = GET_CODE (this_op);
2040 switch (this_code)
2042 case PLUS:
2043 case MINUS:
2044 if (n_ops == 7)
2045 return NULL_RTX;
2047 ops[n_ops].op = XEXP (this_op, 1);
2048 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2049 n_ops++;
2051 ops[i].op = XEXP (this_op, 0);
2052 input_ops++;
2053 changed = 1;
2054 break;
2056 case NEG:
2057 ops[i].op = XEXP (this_op, 0);
2058 ops[i].neg = ! this_neg;
2059 changed = 1;
2060 break;
2062 case CONST:
2063 if (n_ops < 7
2064 && GET_CODE (XEXP (this_op, 0)) == PLUS
2065 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2066 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2068 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2069 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2070 ops[n_ops].neg = this_neg;
2071 n_ops++;
2072 input_consts++;
2073 changed = 1;
2075 break;
2077 case NOT:
2078 /* ~a -> (-a - 1) */
2079 if (n_ops != 7)
2081 ops[n_ops].op = constm1_rtx;
2082 ops[n_ops++].neg = this_neg;
2083 ops[i].op = XEXP (this_op, 0);
2084 ops[i].neg = !this_neg;
2085 changed = 1;
2087 break;
2089 case CONST_INT:
2090 if (this_neg)
2092 ops[i].op = neg_const_int (mode, this_op);
2093 ops[i].neg = 0;
2094 changed = 1;
2096 break;
2098 default:
2099 break;
2103 while (changed);
2105 /* If we only have two operands, we can't do anything. */
2106 if (n_ops <= 2 && !force)
2107 return NULL_RTX;
2109 /* Count the number of CONSTs we didn't split above. */
2110 for (i = 0; i < n_ops; i++)
2111 if (GET_CODE (ops[i].op) == CONST)
2112 input_consts++;
2114 /* Now simplify each pair of operands until nothing changes. The first
2115 time through just simplify constants against each other. */
2117 first = 1;
2120 changed = first;
2122 for (i = 0; i < n_ops - 1; i++)
2123 for (j = i + 1; j < n_ops; j++)
2125 rtx lhs = ops[i].op, rhs = ops[j].op;
2126 int lneg = ops[i].neg, rneg = ops[j].neg;
2128 if (lhs != 0 && rhs != 0
2129 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2131 enum rtx_code ncode = PLUS;
2133 if (lneg != rneg)
2135 ncode = MINUS;
2136 if (lneg)
2137 tem = lhs, lhs = rhs, rhs = tem;
2139 else if (swap_commutative_operands_p (lhs, rhs))
2140 tem = lhs, lhs = rhs, rhs = tem;
2142 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2144 /* Reject "simplifications" that just wrap the two
2145 arguments in a CONST. Failure to do so can result
2146 in infinite recursion with simplify_binary_operation
2147 when it calls us to simplify CONST operations. */
2148 if (tem
2149 && ! (GET_CODE (tem) == CONST
2150 && GET_CODE (XEXP (tem, 0)) == ncode
2151 && XEXP (XEXP (tem, 0), 0) == lhs
2152 && XEXP (XEXP (tem, 0), 1) == rhs)
2153 /* Don't allow -x + -1 -> ~x simplifications in the
2154 first pass. This allows us the chance to combine
2155 the -1 with other constants. */
2156 && ! (first
2157 && GET_CODE (tem) == NOT
2158 && XEXP (tem, 0) == rhs))
2160 lneg &= rneg;
2161 if (GET_CODE (tem) == NEG)
2162 tem = XEXP (tem, 0), lneg = !lneg;
2163 if (GET_CODE (tem) == CONST_INT && lneg)
2164 tem = neg_const_int (mode, tem), lneg = 0;
2166 ops[i].op = tem;
2167 ops[i].neg = lneg;
2168 ops[j].op = NULL_RTX;
2169 changed = 1;
2174 first = 0;
2176 while (changed);
2178 /* Pack all the operands to the lower-numbered entries. */
2179 for (i = 0, j = 0; j < n_ops; j++)
2180 if (ops[j].op)
2181 ops[i++] = ops[j];
2182 n_ops = i;
2184 /* Sort the operations based on swap_commutative_operands_p. */
2185 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2187 /* We suppressed creation of trivial CONST expressions in the
2188 combination loop to avoid recursion. Create one manually now.
2189 The combination loop should have ensured that there is exactly
2190 one CONST_INT, and the sort will have ensured that it is last
2191 in the array and that any other constant will be next-to-last. */
2193 if (n_ops > 1
2194 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2195 && CONSTANT_P (ops[n_ops - 2].op))
2197 rtx value = ops[n_ops - 1].op;
2198 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2199 value = neg_const_int (mode, value);
2200 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2201 n_ops--;
2204 /* Count the number of CONSTs that we generated. */
2205 n_consts = 0;
2206 for (i = 0; i < n_ops; i++)
2207 if (GET_CODE (ops[i].op) == CONST)
2208 n_consts++;
2210 /* Give up if we didn't reduce the number of operands we had. Make
2211 sure we count a CONST as two operands. If we have the same
2212 number of operands, but have made more CONSTs than before, this
2213 is also an improvement, so accept it. */
2214 if (!force
2215 && (n_ops + n_consts > input_ops
2216 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2217 return NULL_RTX;
2219 /* Put a non-negated operand first. If there aren't any, make all
2220 operands positive and negate the whole thing later. */
2222 negate = 0;
2223 for (i = 0; i < n_ops && ops[i].neg; i++)
2224 continue;
2225 if (i == n_ops)
2227 for (i = 0; i < n_ops; i++)
2228 ops[i].neg = 0;
2229 negate = 1;
2231 else if (i != 0)
2233 tem = ops[0].op;
2234 ops[0] = ops[i];
2235 ops[i].op = tem;
2236 ops[i].neg = 1;
2239 /* Now make the result by performing the requested operations. */
2240 result = ops[0].op;
2241 for (i = 1; i < n_ops; i++)
2242 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2243 mode, result, ops[i].op);
2245 return negate ? gen_rtx_NEG (mode, result) : result;
2248 /* Like simplify_binary_operation except used for relational operators.
2249 MODE is the mode of the operands, not that of the result. If MODE
2250 is VOIDmode, both operands must also be VOIDmode and we compare the
2251 operands in "infinite precision".
2253 If no simplification is possible, this function returns zero. Otherwise,
2254 it returns either const_true_rtx or const0_rtx. */
2257 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2258 rtx op0, rtx op1)
2260 int equal, op0lt, op0ltu, op1lt, op1ltu;
2261 rtx tem;
2262 rtx trueop0;
2263 rtx trueop1;
2265 if (mode == VOIDmode
2266 && (GET_MODE (op0) != VOIDmode
2267 || GET_MODE (op1) != VOIDmode))
2268 abort ();
2270 /* If op0 is a compare, extract the comparison arguments from it. */
2271 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2272 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2274 trueop0 = avoid_constant_pool_reference (op0);
2275 trueop1 = avoid_constant_pool_reference (op1);
2277 /* We can't simplify MODE_CC values since we don't know what the
2278 actual comparison is. */
2279 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2280 return 0;
2282 /* Make sure the constant is second. */
2283 if (swap_commutative_operands_p (trueop0, trueop1))
2285 tem = op0, op0 = op1, op1 = tem;
2286 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2287 code = swap_condition (code);
2290 /* For integer comparisons of A and B maybe we can simplify A - B and can
2291 then simplify a comparison of that with zero. If A and B are both either
2292 a register or a CONST_INT, this can't help; testing for these cases will
2293 prevent infinite recursion here and speed things up.
2295 If CODE is an unsigned comparison, then we can never do this optimization,
2296 because it gives an incorrect result if the subtraction wraps around zero.
2297 ANSI C defines unsigned operations such that they never overflow, and
2298 thus such cases can not be ignored. */
2300 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2301 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2302 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2303 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2304 && code != GTU && code != GEU && code != LTU && code != LEU)
2305 return simplify_relational_operation (signed_condition (code),
2306 mode, tem, const0_rtx);
2308 if (flag_unsafe_math_optimizations && code == ORDERED)
2309 return const_true_rtx;
2311 if (flag_unsafe_math_optimizations && code == UNORDERED)
2312 return const0_rtx;
2314 /* For modes without NaNs, if the two operands are equal, we know the
2315 result except if they have side-effects. */
2316 if (! HONOR_NANS (GET_MODE (trueop0))
2317 && rtx_equal_p (trueop0, trueop1)
2318 && ! side_effects_p (trueop0))
2319 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2321 /* If the operands are floating-point constants, see if we can fold
2322 the result. */
2323 else if (GET_CODE (trueop0) == CONST_DOUBLE
2324 && GET_CODE (trueop1) == CONST_DOUBLE
2325 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2327 REAL_VALUE_TYPE d0, d1;
2329 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2330 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2332 /* Comparisons are unordered iff at least one of the values is NaN. */
2333 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2334 switch (code)
2336 case UNEQ:
2337 case UNLT:
2338 case UNGT:
2339 case UNLE:
2340 case UNGE:
2341 case NE:
2342 case UNORDERED:
2343 return const_true_rtx;
2344 case EQ:
2345 case LT:
2346 case GT:
2347 case LE:
2348 case GE:
2349 case LTGT:
2350 case ORDERED:
2351 return const0_rtx;
2352 default:
2353 return 0;
2356 equal = REAL_VALUES_EQUAL (d0, d1);
2357 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2358 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2361 /* Otherwise, see if the operands are both integers. */
2362 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2363 && (GET_CODE (trueop0) == CONST_DOUBLE
2364 || GET_CODE (trueop0) == CONST_INT)
2365 && (GET_CODE (trueop1) == CONST_DOUBLE
2366 || GET_CODE (trueop1) == CONST_INT))
2368 int width = GET_MODE_BITSIZE (mode);
2369 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2370 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2372 /* Get the two words comprising each integer constant. */
2373 if (GET_CODE (trueop0) == CONST_DOUBLE)
2375 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2376 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2378 else
2380 l0u = l0s = INTVAL (trueop0);
2381 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2384 if (GET_CODE (trueop1) == CONST_DOUBLE)
2386 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2387 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2389 else
2391 l1u = l1s = INTVAL (trueop1);
2392 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2395 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2396 we have to sign or zero-extend the values. */
2397 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2399 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2400 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2402 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2403 l0s |= ((HOST_WIDE_INT) (-1) << width);
2405 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2406 l1s |= ((HOST_WIDE_INT) (-1) << width);
2408 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2409 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2411 equal = (h0u == h1u && l0u == l1u);
2412 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2413 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2414 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2415 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2418 /* Otherwise, there are some code-specific tests we can make. */
2419 else
2421 switch (code)
2423 case EQ:
2424 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2425 return const0_rtx;
2426 break;
2428 case NE:
2429 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2430 return const_true_rtx;
2431 break;
2433 case GEU:
2434 /* Unsigned values are never negative. */
2435 if (trueop1 == const0_rtx)
2436 return const_true_rtx;
2437 break;
2439 case LTU:
2440 if (trueop1 == const0_rtx)
2441 return const0_rtx;
2442 break;
2444 case LEU:
2445 /* Unsigned values are never greater than the largest
2446 unsigned value. */
2447 if (GET_CODE (trueop1) == CONST_INT
2448 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2449 && INTEGRAL_MODE_P (mode))
2450 return const_true_rtx;
2451 break;
2453 case GTU:
2454 if (GET_CODE (trueop1) == CONST_INT
2455 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2456 && INTEGRAL_MODE_P (mode))
2457 return const0_rtx;
2458 break;
2460 case LT:
2461 /* Optimize abs(x) < 0.0. */
2462 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2464 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2465 : trueop0;
2466 if (GET_CODE (tem) == ABS)
2467 return const0_rtx;
2469 break;
2471 case GE:
2472 /* Optimize abs(x) >= 0.0. */
2473 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2475 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2476 : trueop0;
2477 if (GET_CODE (tem) == ABS)
2478 return const_true_rtx;
2480 break;
2482 case UNGE:
2483 /* Optimize ! (abs(x) < 0.0). */
2484 if (trueop1 == CONST0_RTX (mode))
2486 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2487 : trueop0;
2488 if (GET_CODE (tem) == ABS)
2489 return const_true_rtx;
2491 break;
2493 default:
2494 break;
2497 return 0;
2500 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2501 as appropriate. */
2502 switch (code)
2504 case EQ:
2505 case UNEQ:
2506 return equal ? const_true_rtx : const0_rtx;
2507 case NE:
2508 case LTGT:
2509 return ! equal ? const_true_rtx : const0_rtx;
2510 case LT:
2511 case UNLT:
2512 return op0lt ? const_true_rtx : const0_rtx;
2513 case GT:
2514 case UNGT:
2515 return op1lt ? const_true_rtx : const0_rtx;
2516 case LTU:
2517 return op0ltu ? const_true_rtx : const0_rtx;
2518 case GTU:
2519 return op1ltu ? const_true_rtx : const0_rtx;
2520 case LE:
2521 case UNLE:
2522 return equal || op0lt ? const_true_rtx : const0_rtx;
2523 case GE:
2524 case UNGE:
2525 return equal || op1lt ? const_true_rtx : const0_rtx;
2526 case LEU:
2527 return equal || op0ltu ? const_true_rtx : const0_rtx;
2528 case GEU:
2529 return equal || op1ltu ? const_true_rtx : const0_rtx;
2530 case ORDERED:
2531 return const_true_rtx;
2532 case UNORDERED:
2533 return const0_rtx;
2534 default:
2535 abort ();
2539 /* Simplify CODE, an operation with result mode MODE and three operands,
2540 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2541 a constant. Return 0 if no simplifications is possible. */
2544 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2545 enum machine_mode op0_mode, rtx op0, rtx op1,
2546 rtx op2)
2548 unsigned int width = GET_MODE_BITSIZE (mode);
2550 /* VOIDmode means "infinite" precision. */
2551 if (width == 0)
2552 width = HOST_BITS_PER_WIDE_INT;
2554 switch (code)
2556 case SIGN_EXTRACT:
2557 case ZERO_EXTRACT:
2558 if (GET_CODE (op0) == CONST_INT
2559 && GET_CODE (op1) == CONST_INT
2560 && GET_CODE (op2) == CONST_INT
2561 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2562 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2564 /* Extracting a bit-field from a constant */
2565 HOST_WIDE_INT val = INTVAL (op0);
2567 if (BITS_BIG_ENDIAN)
2568 val >>= (GET_MODE_BITSIZE (op0_mode)
2569 - INTVAL (op2) - INTVAL (op1));
2570 else
2571 val >>= INTVAL (op2);
2573 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2575 /* First zero-extend. */
2576 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2577 /* If desired, propagate sign bit. */
2578 if (code == SIGN_EXTRACT
2579 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2580 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2583 /* Clear the bits that don't belong in our mode,
2584 unless they and our sign bit are all one.
2585 So we get either a reasonable negative value or a reasonable
2586 unsigned value for this mode. */
2587 if (width < HOST_BITS_PER_WIDE_INT
2588 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2589 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2590 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2592 return GEN_INT (val);
2594 break;
2596 case IF_THEN_ELSE:
2597 if (GET_CODE (op0) == CONST_INT)
2598 return op0 != const0_rtx ? op1 : op2;
2600 /* Convert a == b ? b : a to "a". */
2601 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2602 && !HONOR_NANS (mode)
2603 && rtx_equal_p (XEXP (op0, 0), op1)
2604 && rtx_equal_p (XEXP (op0, 1), op2))
2605 return op1;
2606 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2607 && !HONOR_NANS (mode)
2608 && rtx_equal_p (XEXP (op0, 1), op1)
2609 && rtx_equal_p (XEXP (op0, 0), op2))
2610 return op2;
2611 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2613 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2614 ? GET_MODE (XEXP (op0, 1))
2615 : GET_MODE (XEXP (op0, 0)));
2616 rtx temp;
2617 if (cmp_mode == VOIDmode)
2618 cmp_mode = op0_mode;
2619 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2620 XEXP (op0, 0), XEXP (op0, 1));
2622 /* See if any simplifications were possible. */
2623 if (temp == const0_rtx)
2624 return op2;
2625 else if (temp == const1_rtx)
2626 return op1;
2627 else if (temp)
2628 op0 = temp;
2630 /* Look for happy constants in op1 and op2. */
2631 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2633 HOST_WIDE_INT t = INTVAL (op1);
2634 HOST_WIDE_INT f = INTVAL (op2);
2636 if (t == STORE_FLAG_VALUE && f == 0)
2637 code = GET_CODE (op0);
2638 else if (t == 0 && f == STORE_FLAG_VALUE)
2640 enum rtx_code tmp;
2641 tmp = reversed_comparison_code (op0, NULL_RTX);
2642 if (tmp == UNKNOWN)
2643 break;
2644 code = tmp;
2646 else
2647 break;
2649 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2652 break;
2653 case VEC_MERGE:
2654 if (GET_MODE (op0) != mode
2655 || GET_MODE (op1) != mode
2656 || !VECTOR_MODE_P (mode))
2657 abort ();
2658 op2 = avoid_constant_pool_reference (op2);
2659 if (GET_CODE (op2) == CONST_INT)
2661 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2662 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2663 int mask = (1 << n_elts) - 1;
2665 if (!(INTVAL (op2) & mask))
2666 return op1;
2667 if ((INTVAL (op2) & mask) == mask)
2668 return op0;
2670 op0 = avoid_constant_pool_reference (op0);
2671 op1 = avoid_constant_pool_reference (op1);
2672 if (GET_CODE (op0) == CONST_VECTOR
2673 && GET_CODE (op1) == CONST_VECTOR)
2675 rtvec v = rtvec_alloc (n_elts);
2676 unsigned int i;
2678 for (i = 0; i < n_elts; i++)
2679 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2680 ? CONST_VECTOR_ELT (op0, i)
2681 : CONST_VECTOR_ELT (op1, i));
2682 return gen_rtx_CONST_VECTOR (mode, v);
2685 break;
2687 default:
2688 abort ();
2691 return 0;
2694 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2695 Return 0 if no simplifications is possible. */
2697 simplify_subreg (enum machine_mode outermode, rtx op,
2698 enum machine_mode innermode, unsigned int byte)
2700 /* Little bit of sanity checking. */
2701 if (innermode == VOIDmode || outermode == VOIDmode
2702 || innermode == BLKmode || outermode == BLKmode)
2703 abort ();
2705 if (GET_MODE (op) != innermode
2706 && GET_MODE (op) != VOIDmode)
2707 abort ();
2709 if (byte % GET_MODE_SIZE (outermode)
2710 || byte >= GET_MODE_SIZE (innermode))
2711 abort ();
2713 if (outermode == innermode && !byte)
2714 return op;
2716 /* Simplify subregs of vector constants. */
2717 if (GET_CODE (op) == CONST_VECTOR)
2719 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2720 const unsigned int offset = byte / elt_size;
2721 rtx elt;
2723 if (GET_MODE_INNER (innermode) == outermode)
2725 elt = CONST_VECTOR_ELT (op, offset);
2727 /* ?? We probably don't need this copy_rtx because constants
2728 can be shared. ?? */
2730 return copy_rtx (elt);
2732 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2733 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2735 return (gen_rtx_CONST_VECTOR
2736 (outermode,
2737 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2738 &CONST_VECTOR_ELT (op, offset))));
2740 else if (GET_MODE_CLASS (outermode) == MODE_INT
2741 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2743 /* This happens when the target register size is smaller then
2744 the vector mode, and we synthesize operations with vectors
2745 of elements that are smaller than the register size. */
2746 HOST_WIDE_INT sum = 0, high = 0;
2747 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2748 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2749 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2750 int shift = BITS_PER_UNIT * elt_size;
2751 unsigned HOST_WIDE_INT unit_mask;
2753 unit_mask = (unsigned HOST_WIDE_INT) -1
2754 >> (sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - shift);
2756 for (; n_elts--; i += step)
2758 elt = CONST_VECTOR_ELT (op, i);
2759 if (GET_CODE (elt) == CONST_DOUBLE
2760 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2762 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2763 elt);
2764 if (! elt)
2765 return NULL_RTX;
2767 if (GET_CODE (elt) != CONST_INT)
2768 return NULL_RTX;
2769 /* Avoid overflow. */
2770 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
2771 return NULL_RTX;
2772 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2773 sum = (sum << shift) + (INTVAL (elt) & unit_mask);
2775 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2776 return GEN_INT (trunc_int_for_mode (sum, outermode));
2777 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2778 return immed_double_const (sum, high, outermode);
2779 else
2780 return NULL_RTX;
2782 else if (GET_MODE_CLASS (outermode) == MODE_INT
2783 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2785 enum machine_mode new_mode
2786 = int_mode_for_mode (GET_MODE_INNER (innermode));
2787 int subbyte = byte % elt_size;
2789 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2790 if (! op)
2791 return NULL_RTX;
2792 return simplify_subreg (outermode, op, new_mode, subbyte);
2794 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2795 /* This shouldn't happen, but let's not do anything stupid. */
2796 return NULL_RTX;
2799 /* Attempt to simplify constant to non-SUBREG expression. */
2800 if (CONSTANT_P (op))
2802 int offset, part;
2803 unsigned HOST_WIDE_INT val = 0;
2805 if (VECTOR_MODE_P (outermode))
2807 /* Construct a CONST_VECTOR from individual subregs. */
2808 enum machine_mode submode = GET_MODE_INNER (outermode);
2809 int subsize = GET_MODE_UNIT_SIZE (outermode);
2810 int i, elts = GET_MODE_NUNITS (outermode);
2811 rtvec v = rtvec_alloc (elts);
2812 rtx elt;
2814 for (i = 0; i < elts; i++, byte += subsize)
2816 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2817 /* ??? It would be nice if we could actually make such subregs
2818 on targets that allow such relocations. */
2819 if (byte >= GET_MODE_SIZE (innermode))
2820 elt = CONST0_RTX (submode);
2821 else
2822 elt = simplify_subreg (submode, op, innermode, byte);
2823 if (! elt)
2824 return NULL_RTX;
2825 RTVEC_ELT (v, i) = elt;
2827 return gen_rtx_CONST_VECTOR (outermode, v);
2830 /* ??? This code is partly redundant with code below, but can handle
2831 the subregs of floats and similar corner cases.
2832 Later it we should move all simplification code here and rewrite
2833 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2834 using SIMPLIFY_SUBREG. */
2835 if (subreg_lowpart_offset (outermode, innermode) == byte
2836 && GET_CODE (op) != CONST_VECTOR)
2838 rtx new = gen_lowpart_if_possible (outermode, op);
2839 if (new)
2840 return new;
2843 /* Similar comment as above apply here. */
2844 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2845 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2846 && GET_MODE_CLASS (outermode) == MODE_INT)
2848 rtx new = constant_subword (op,
2849 (byte / UNITS_PER_WORD),
2850 innermode);
2851 if (new)
2852 return new;
2855 if (GET_MODE_CLASS (outermode) != MODE_INT
2856 && GET_MODE_CLASS (outermode) != MODE_CC)
2858 enum machine_mode new_mode = int_mode_for_mode (outermode);
2860 if (new_mode != innermode || byte != 0)
2862 op = simplify_subreg (new_mode, op, innermode, byte);
2863 if (! op)
2864 return NULL_RTX;
2865 return simplify_subreg (outermode, op, new_mode, 0);
2869 offset = byte * BITS_PER_UNIT;
2870 switch (GET_CODE (op))
2872 case CONST_DOUBLE:
2873 if (GET_MODE (op) != VOIDmode)
2874 break;
2876 /* We can't handle this case yet. */
2877 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2878 return NULL_RTX;
2880 part = offset >= HOST_BITS_PER_WIDE_INT;
2881 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2882 && BYTES_BIG_ENDIAN)
2883 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2884 && WORDS_BIG_ENDIAN))
2885 part = !part;
2886 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2887 offset %= HOST_BITS_PER_WIDE_INT;
2889 /* We've already picked the word we want from a double, so
2890 pretend this is actually an integer. */
2891 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2893 /* FALLTHROUGH */
2894 case CONST_INT:
2895 if (GET_CODE (op) == CONST_INT)
2896 val = INTVAL (op);
2898 /* We don't handle synthesizing of non-integral constants yet. */
2899 if (GET_MODE_CLASS (outermode) != MODE_INT)
2900 return NULL_RTX;
2902 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2904 if (WORDS_BIG_ENDIAN)
2905 offset = (GET_MODE_BITSIZE (innermode)
2906 - GET_MODE_BITSIZE (outermode) - offset);
2907 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2908 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2909 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2910 - 2 * (offset % BITS_PER_WORD));
2913 if (offset >= HOST_BITS_PER_WIDE_INT)
2914 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2915 else
2917 val >>= offset;
2918 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2919 val = trunc_int_for_mode (val, outermode);
2920 return GEN_INT (val);
2922 default:
2923 break;
2927 /* Changing mode twice with SUBREG => just change it once,
2928 or not at all if changing back op starting mode. */
2929 if (GET_CODE (op) == SUBREG)
2931 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2932 int final_offset = byte + SUBREG_BYTE (op);
2933 rtx new;
2935 if (outermode == innermostmode
2936 && byte == 0 && SUBREG_BYTE (op) == 0)
2937 return SUBREG_REG (op);
2939 /* The SUBREG_BYTE represents offset, as if the value were stored
2940 in memory. Irritating exception is paradoxical subreg, where
2941 we define SUBREG_BYTE to be 0. On big endian machines, this
2942 value should be negative. For a moment, undo this exception. */
2943 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2945 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2946 if (WORDS_BIG_ENDIAN)
2947 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2948 if (BYTES_BIG_ENDIAN)
2949 final_offset += difference % UNITS_PER_WORD;
2951 if (SUBREG_BYTE (op) == 0
2952 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2954 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2955 if (WORDS_BIG_ENDIAN)
2956 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2957 if (BYTES_BIG_ENDIAN)
2958 final_offset += difference % UNITS_PER_WORD;
2961 /* See whether resulting subreg will be paradoxical. */
2962 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2964 /* In nonparadoxical subregs we can't handle negative offsets. */
2965 if (final_offset < 0)
2966 return NULL_RTX;
2967 /* Bail out in case resulting subreg would be incorrect. */
2968 if (final_offset % GET_MODE_SIZE (outermode)
2969 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2970 return NULL_RTX;
2972 else
2974 int offset = 0;
2975 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2977 /* In paradoxical subreg, see if we are still looking on lower part.
2978 If so, our SUBREG_BYTE will be 0. */
2979 if (WORDS_BIG_ENDIAN)
2980 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2981 if (BYTES_BIG_ENDIAN)
2982 offset += difference % UNITS_PER_WORD;
2983 if (offset == final_offset)
2984 final_offset = 0;
2985 else
2986 return NULL_RTX;
2989 /* Recurse for further possible simplifications. */
2990 new = simplify_subreg (outermode, SUBREG_REG (op),
2991 GET_MODE (SUBREG_REG (op)),
2992 final_offset);
2993 if (new)
2994 return new;
2995 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2998 /* SUBREG of a hard register => just change the register number
2999 and/or mode. If the hard register is not valid in that mode,
3000 suppress this simplification. If the hard register is the stack,
3001 frame, or argument pointer, leave this as a SUBREG. */
3003 if (REG_P (op)
3004 && (! REG_FUNCTION_VALUE_P (op)
3005 || ! rtx_equal_function_value_matters)
3006 && REGNO (op) < FIRST_PSEUDO_REGISTER
3007 #ifdef CANNOT_CHANGE_MODE_CLASS
3008 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3009 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3010 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3011 #endif
3012 && ((reload_completed && !frame_pointer_needed)
3013 || (REGNO (op) != FRAME_POINTER_REGNUM
3014 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3015 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3016 #endif
3018 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3019 && REGNO (op) != ARG_POINTER_REGNUM
3020 #endif
3021 && REGNO (op) != STACK_POINTER_REGNUM)
3023 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
3026 /* ??? We do allow it if the current REG is not valid for
3027 its mode. This is a kludge to work around how float/complex
3028 arguments are passed on 32-bit SPARC and should be fixed. */
3029 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3030 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3032 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3034 /* Propagate original regno. We don't have any way to specify
3035 the offset inside original regno, so do so only for lowpart.
3036 The information is used only by alias analysis that can not
3037 grog partial register anyway. */
3039 if (subreg_lowpart_offset (outermode, innermode) == byte)
3040 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3041 return x;
3045 /* If we have a SUBREG of a register that we are replacing and we are
3046 replacing it with a MEM, make a new MEM and try replacing the
3047 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3048 or if we would be widening it. */
3050 if (GET_CODE (op) == MEM
3051 && ! mode_dependent_address_p (XEXP (op, 0))
3052 /* Allow splitting of volatile memory references in case we don't
3053 have instruction to move the whole thing. */
3054 && (! MEM_VOLATILE_P (op)
3055 || ! have_insn_for (SET, innermode))
3056 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3057 return adjust_address_nv (op, outermode, byte);
3059 /* Handle complex values represented as CONCAT
3060 of real and imaginary part. */
3061 if (GET_CODE (op) == CONCAT)
3063 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
3064 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3065 unsigned int final_offset;
3066 rtx res;
3068 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3069 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3070 if (res)
3071 return res;
3072 /* We can at least simplify it by referring directly to the relevant part. */
3073 return gen_rtx_SUBREG (outermode, part, final_offset);
3076 return NULL_RTX;
3078 /* Make a SUBREG operation or equivalent if it folds. */
3081 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3082 enum machine_mode innermode, unsigned int byte)
3084 rtx new;
3085 /* Little bit of sanity checking. */
3086 if (innermode == VOIDmode || outermode == VOIDmode
3087 || innermode == BLKmode || outermode == BLKmode)
3088 abort ();
3090 if (GET_MODE (op) != innermode
3091 && GET_MODE (op) != VOIDmode)
3092 abort ();
3094 if (byte % GET_MODE_SIZE (outermode)
3095 || byte >= GET_MODE_SIZE (innermode))
3096 abort ();
3098 if (GET_CODE (op) == QUEUED)
3099 return NULL_RTX;
3101 new = simplify_subreg (outermode, op, innermode, byte);
3102 if (new)
3103 return new;
3105 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3106 return NULL_RTX;
3108 return gen_rtx_SUBREG (outermode, op, byte);
3110 /* Simplify X, an rtx expression.
3112 Return the simplified expression or NULL if no simplifications
3113 were possible.
3115 This is the preferred entry point into the simplification routines;
3116 however, we still allow passes to call the more specific routines.
3118 Right now GCC has three (yes, three) major bodies of RTL simplification
3119 code that need to be unified.
3121 1. fold_rtx in cse.c. This code uses various CSE specific
3122 information to aid in RTL simplification.
3124 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3125 it uses combine specific information to aid in RTL
3126 simplification.
3128 3. The routines in this file.
3131 Long term we want to only have one body of simplification code; to
3132 get to that state I recommend the following steps:
3134 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3135 which are not pass dependent state into these routines.
3137 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3138 use this routine whenever possible.
3140 3. Allow for pass dependent state to be provided to these
3141 routines and add simplifications based on the pass dependent
3142 state. Remove code from cse.c & combine.c that becomes
3143 redundant/dead.
3145 It will take time, but ultimately the compiler will be easier to
3146 maintain and improve. It's totally silly that when we add a
3147 simplification that it needs to be added to 4 places (3 for RTL
3148 simplification and 1 for tree simplification. */
3151 simplify_rtx (rtx x)
3153 enum rtx_code code = GET_CODE (x);
3154 enum machine_mode mode = GET_MODE (x);
3155 rtx temp;
3157 switch (GET_RTX_CLASS (code))
3159 case '1':
3160 return simplify_unary_operation (code, mode,
3161 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3162 case 'c':
3163 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3164 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3166 /* Fall through.... */
3168 case '2':
3169 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3171 case '3':
3172 case 'b':
3173 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3174 XEXP (x, 0), XEXP (x, 1),
3175 XEXP (x, 2));
3177 case '<':
3178 temp = simplify_relational_operation (code,
3179 ((GET_MODE (XEXP (x, 0))
3180 != VOIDmode)
3181 ? GET_MODE (XEXP (x, 0))
3182 : GET_MODE (XEXP (x, 1))),
3183 XEXP (x, 0), XEXP (x, 1));
3184 #ifdef FLOAT_STORE_FLAG_VALUE
3185 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3187 if (temp == const0_rtx)
3188 temp = CONST0_RTX (mode);
3189 else
3190 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3191 mode);
3193 #endif
3194 return temp;
3196 case 'x':
3197 if (code == SUBREG)
3198 return simplify_gen_subreg (mode, SUBREG_REG (x),
3199 GET_MODE (SUBREG_REG (x)),
3200 SUBREG_BYTE (x));
3201 if (code == CONSTANT_P_RTX)
3203 if (CONSTANT_P (XEXP (x, 0)))
3204 return const1_rtx;
3206 break;
3208 case 'o':
3209 if (code == LO_SUM)
3211 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3212 if (GET_CODE (XEXP (x, 0)) == HIGH
3213 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3214 return XEXP (x, 1);
3216 break;
3218 default:
3219 break;
3221 return NULL;