Replace occurances of "GNU CC" with "GCC" and reformat as appropriate.
[official-gcc.git] / gcc / simplify-rtx.c
blobe874c2a2883101d06593429685b131d8edfa645b
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
53 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
54 const void *));
55 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
56 enum machine_mode, rtx,
57 rtx, int));
59 /* Negate a CONST_INT rtx, truncating (because a conversion from a
60 maximally negative number can overflow). */
61 static rtx
62 neg_const_int (mode, i)
63 enum machine_mode mode;
64 rtx i;
66 return gen_int_mode (- INTVAL (i), mode);
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
73 rtx
74 simplify_gen_binary (code, mode, op0, op1)
75 enum rtx_code code;
76 enum machine_mode mode;
77 rtx op0, op1;
79 rtx tem;
81 /* Put complex operands first and constants second if commutative. */
82 if (GET_RTX_CLASS (code) == 'c'
83 && swap_commutative_operands_p (op0, op1))
84 tem = op0, op0 = op1, op1 = tem;
86 /* If this simplifies, do it. */
87 tem = simplify_binary_operation (code, mode, op0, op1);
88 if (tem)
89 return tem;
91 /* Handle addition and subtraction specially. Otherwise, just form
92 the operation. */
94 if (code == PLUS || code == MINUS)
96 tem = simplify_plus_minus (code, mode, op0, op1, 1);
97 if (tem)
98 return tem;
101 return gen_rtx_fmt_ee (code, mode, op0, op1);
104 /* If X is a MEM referencing the constant pool, return the real value.
105 Otherwise return X. */
107 avoid_constant_pool_reference (x)
108 rtx x;
110 rtx c, tmp, addr;
111 enum machine_mode cmode;
113 switch (GET_CODE (x))
115 case MEM:
116 break;
118 case FLOAT_EXTEND:
119 /* Handle float extensions of constant pool references. */
120 tmp = XEXP (x, 0);
121 c = avoid_constant_pool_reference (tmp);
122 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
124 REAL_VALUE_TYPE d;
126 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
127 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
129 return x;
131 default:
132 return x;
135 addr = XEXP (x, 0);
137 /* Call target hook to avoid the effects of -fpic etc... */
138 addr = (*targetm.delegitimize_address) (addr);
140 if (GET_CODE (addr) == LO_SUM)
141 addr = XEXP (addr, 1);
143 if (GET_CODE (addr) != SYMBOL_REF
144 || ! CONSTANT_POOL_ADDRESS_P (addr))
145 return x;
147 c = get_pool_constant (addr);
148 cmode = get_pool_mode (addr);
150 /* If we're accessing the constant in a different mode than it was
151 originally stored, attempt to fix that up via subreg simplifications.
152 If that fails we have no choice but to return the original memory. */
153 if (cmode != GET_MODE (x))
155 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
156 return c ? c : x;
159 return c;
162 /* Make a unary operation by first seeing if it folds and otherwise making
163 the specified operation. */
166 simplify_gen_unary (code, mode, op, op_mode)
167 enum rtx_code code;
168 enum machine_mode mode;
169 rtx op;
170 enum machine_mode op_mode;
172 rtx tem;
174 /* If this simplifies, use it. */
175 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
176 return tem;
178 return gen_rtx_fmt_e (code, mode, op);
181 /* Likewise for ternary operations. */
184 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
185 enum rtx_code code;
186 enum machine_mode mode, op0_mode;
187 rtx op0, op1, op2;
189 rtx tem;
191 /* If this simplifies, use it. */
192 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
193 op0, op1, op2)))
194 return tem;
196 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
199 /* Likewise, for relational operations.
200 CMP_MODE specifies mode comparison is done in.
204 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
205 enum rtx_code code;
206 enum machine_mode mode;
207 enum machine_mode cmp_mode;
208 rtx op0, op1;
210 rtx tem;
212 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
213 return tem;
215 /* For the following tests, ensure const0_rtx is op1. */
216 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
217 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
219 /* If op0 is a compare, extract the comparison arguments from it. */
220 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
221 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
223 /* If op0 is a comparison, extract the comparison arguments form it. */
224 if (code == NE && op1 == const0_rtx
225 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
226 return op0;
227 else if (code == EQ && op1 == const0_rtx)
229 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
230 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
231 if (new != UNKNOWN)
233 code = new;
234 mode = cmp_mode;
235 op1 = XEXP (op0, 1);
236 op0 = XEXP (op0, 0);
240 /* Put complex operands first and constants second. */
241 if (swap_commutative_operands_p (op0, op1))
242 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
244 return gen_rtx_fmt_ee (code, mode, op0, op1);
247 /* Replace all occurrences of OLD in X with NEW and try to simplify the
248 resulting RTX. Return a new RTX which is as simplified as possible. */
251 simplify_replace_rtx (x, old, new)
252 rtx x;
253 rtx old;
254 rtx new;
256 enum rtx_code code = GET_CODE (x);
257 enum machine_mode mode = GET_MODE (x);
259 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
260 to build a new expression substituting recursively. If we can't do
261 anything, return our input. */
263 if (x == old)
264 return new;
266 switch (GET_RTX_CLASS (code))
268 case '1':
270 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
271 rtx op = (XEXP (x, 0) == old
272 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
274 return simplify_gen_unary (code, mode, op, op_mode);
277 case '2':
278 case 'c':
279 return
280 simplify_gen_binary (code, mode,
281 simplify_replace_rtx (XEXP (x, 0), old, new),
282 simplify_replace_rtx (XEXP (x, 1), old, new));
283 case '<':
285 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
286 ? GET_MODE (XEXP (x, 0))
287 : GET_MODE (XEXP (x, 1)));
288 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
289 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
291 return
292 simplify_gen_relational (code, mode,
293 (op_mode != VOIDmode
294 ? op_mode
295 : GET_MODE (op0) != VOIDmode
296 ? GET_MODE (op0)
297 : GET_MODE (op1)),
298 op0, op1);
301 case '3':
302 case 'b':
304 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
305 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
307 return
308 simplify_gen_ternary (code, mode,
309 (op_mode != VOIDmode
310 ? op_mode
311 : GET_MODE (op0)),
312 op0,
313 simplify_replace_rtx (XEXP (x, 1), old, new),
314 simplify_replace_rtx (XEXP (x, 2), old, new));
317 case 'x':
318 /* The only case we try to handle is a SUBREG. */
319 if (code == SUBREG)
321 rtx exp;
322 exp = simplify_gen_subreg (GET_MODE (x),
323 simplify_replace_rtx (SUBREG_REG (x),
324 old, new),
325 GET_MODE (SUBREG_REG (x)),
326 SUBREG_BYTE (x));
327 if (exp)
328 x = exp;
330 return x;
332 case 'o':
333 if (code == MEM)
334 return replace_equiv_address_nv (x,
335 simplify_replace_rtx (XEXP (x, 0),
336 old, new));
337 else if (code == LO_SUM)
339 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
340 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
342 /* (lo_sum (high x) x) -> x */
343 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
344 return op1;
346 return gen_rtx_LO_SUM (mode, op0, op1);
348 else if (code == REG)
350 if (REG_P (old) && REGNO (x) == REGNO (old))
351 return new;
354 return x;
356 default:
357 return x;
359 return x;
362 /* Try to simplify a unary operation CODE whose output mode is to be
363 MODE with input operand OP whose mode was originally OP_MODE.
364 Return zero if no simplification can be made. */
366 simplify_unary_operation (code, mode, op, op_mode)
367 enum rtx_code code;
368 enum machine_mode mode;
369 rtx op;
370 enum machine_mode op_mode;
372 unsigned int width = GET_MODE_BITSIZE (mode);
373 rtx trueop = avoid_constant_pool_reference (op);
375 if (code == VEC_DUPLICATE)
377 if (!VECTOR_MODE_P (mode))
378 abort ();
379 if (GET_MODE (trueop) != VOIDmode
380 && !VECTOR_MODE_P (GET_MODE (trueop))
381 && GET_MODE_INNER (mode) != GET_MODE (trueop))
382 abort ();
383 if (GET_MODE (trueop) != VOIDmode
384 && VECTOR_MODE_P (GET_MODE (trueop))
385 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
386 abort ();
387 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
388 || GET_CODE (trueop) == CONST_VECTOR)
390 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
391 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
392 rtvec v = rtvec_alloc (n_elts);
393 unsigned int i;
395 if (GET_CODE (trueop) != CONST_VECTOR)
396 for (i = 0; i < n_elts; i++)
397 RTVEC_ELT (v, i) = trueop;
398 else
400 enum machine_mode inmode = GET_MODE (trueop);
401 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
402 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
404 if (in_n_elts >= n_elts || n_elts % in_n_elts)
405 abort ();
406 for (i = 0; i < n_elts; i++)
407 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
409 return gen_rtx_CONST_VECTOR (mode, v);
413 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
415 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
416 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
417 enum machine_mode opmode = GET_MODE (trueop);
418 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
419 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
420 rtvec v = rtvec_alloc (n_elts);
421 unsigned int i;
423 if (op_n_elts != n_elts)
424 abort ();
426 for (i = 0; i < n_elts; i++)
428 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
429 CONST_VECTOR_ELT (trueop, i),
430 GET_MODE_INNER (opmode));
431 if (!x)
432 return 0;
433 RTVEC_ELT (v, i) = x;
435 return gen_rtx_CONST_VECTOR (mode, v);
438 /* The order of these tests is critical so that, for example, we don't
439 check the wrong mode (input vs. output) for a conversion operation,
440 such as FIX. At some point, this should be simplified. */
442 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
443 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
445 HOST_WIDE_INT hv, lv;
446 REAL_VALUE_TYPE d;
448 if (GET_CODE (trueop) == CONST_INT)
449 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
450 else
451 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
453 REAL_VALUE_FROM_INT (d, lv, hv, mode);
454 d = real_value_truncate (mode, d);
455 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
457 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
458 && (GET_CODE (trueop) == CONST_DOUBLE
459 || GET_CODE (trueop) == CONST_INT))
461 HOST_WIDE_INT hv, lv;
462 REAL_VALUE_TYPE d;
464 if (GET_CODE (trueop) == CONST_INT)
465 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
466 else
467 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
469 if (op_mode == VOIDmode)
471 /* We don't know how to interpret negative-looking numbers in
472 this case, so don't try to fold those. */
473 if (hv < 0)
474 return 0;
476 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
478 else
479 hv = 0, lv &= GET_MODE_MASK (op_mode);
481 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
482 d = real_value_truncate (mode, d);
483 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
486 if (GET_CODE (trueop) == CONST_INT
487 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
489 HOST_WIDE_INT arg0 = INTVAL (trueop);
490 HOST_WIDE_INT val;
492 switch (code)
494 case NOT:
495 val = ~ arg0;
496 break;
498 case NEG:
499 val = - arg0;
500 break;
502 case ABS:
503 val = (arg0 >= 0 ? arg0 : - arg0);
504 break;
506 case FFS:
507 /* Don't use ffs here. Instead, get low order bit and then its
508 number. If arg0 is zero, this will return 0, as desired. */
509 arg0 &= GET_MODE_MASK (mode);
510 val = exact_log2 (arg0 & (- arg0)) + 1;
511 break;
513 case CLZ:
514 arg0 &= GET_MODE_MASK (mode);
515 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
517 else
518 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
519 break;
521 case CTZ:
522 arg0 &= GET_MODE_MASK (mode);
523 if (arg0 == 0)
525 /* Even if the value at zero is undefined, we have to come
526 up with some replacement. Seems good enough. */
527 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
528 val = GET_MODE_BITSIZE (mode);
530 else
531 val = exact_log2 (arg0 & -arg0);
532 break;
534 case POPCOUNT:
535 arg0 &= GET_MODE_MASK (mode);
536 val = 0;
537 while (arg0)
538 val++, arg0 &= arg0 - 1;
539 break;
541 case PARITY:
542 arg0 &= GET_MODE_MASK (mode);
543 val = 0;
544 while (arg0)
545 val++, arg0 &= arg0 - 1;
546 val &= 1;
547 break;
549 case TRUNCATE:
550 val = arg0;
551 break;
553 case ZERO_EXTEND:
554 /* When zero-extending a CONST_INT, we need to know its
555 original mode. */
556 if (op_mode == VOIDmode)
557 abort ();
558 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
560 /* If we were really extending the mode,
561 we would have to distinguish between zero-extension
562 and sign-extension. */
563 if (width != GET_MODE_BITSIZE (op_mode))
564 abort ();
565 val = arg0;
567 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
568 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
569 else
570 return 0;
571 break;
573 case SIGN_EXTEND:
574 if (op_mode == VOIDmode)
575 op_mode = mode;
576 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
578 /* If we were really extending the mode,
579 we would have to distinguish between zero-extension
580 and sign-extension. */
581 if (width != GET_MODE_BITSIZE (op_mode))
582 abort ();
583 val = arg0;
585 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
588 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
589 if (val
590 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
591 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
593 else
594 return 0;
595 break;
597 case SQRT:
598 case FLOAT_EXTEND:
599 case FLOAT_TRUNCATE:
600 case SS_TRUNCATE:
601 case US_TRUNCATE:
602 return 0;
604 default:
605 abort ();
608 val = trunc_int_for_mode (val, mode);
610 return GEN_INT (val);
613 /* We can do some operations on integer CONST_DOUBLEs. Also allow
614 for a DImode operation on a CONST_INT. */
615 else if (GET_MODE (trueop) == VOIDmode
616 && width <= HOST_BITS_PER_WIDE_INT * 2
617 && (GET_CODE (trueop) == CONST_DOUBLE
618 || GET_CODE (trueop) == CONST_INT))
620 unsigned HOST_WIDE_INT l1, lv;
621 HOST_WIDE_INT h1, hv;
623 if (GET_CODE (trueop) == CONST_DOUBLE)
624 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
625 else
626 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
628 switch (code)
630 case NOT:
631 lv = ~ l1;
632 hv = ~ h1;
633 break;
635 case NEG:
636 neg_double (l1, h1, &lv, &hv);
637 break;
639 case ABS:
640 if (h1 < 0)
641 neg_double (l1, h1, &lv, &hv);
642 else
643 lv = l1, hv = h1;
644 break;
646 case FFS:
647 hv = 0;
648 if (l1 == 0)
650 if (h1 == 0)
651 lv = 0;
652 else
653 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
655 else
656 lv = exact_log2 (l1 & -l1) + 1;
657 break;
659 case CLZ:
660 hv = 0;
661 if (h1 == 0)
662 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
663 else
664 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
665 - HOST_BITS_PER_WIDE_INT;
666 break;
668 case CTZ:
669 hv = 0;
670 if (l1 == 0)
672 if (h1 == 0)
673 lv = GET_MODE_BITSIZE (mode);
674 else
675 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
677 else
678 lv = exact_log2 (l1 & -l1);
679 break;
681 case POPCOUNT:
682 hv = 0;
683 lv = 0;
684 while (l1)
685 lv++, l1 &= l1 - 1;
686 while (h1)
687 lv++, h1 &= h1 - 1;
688 break;
690 case PARITY:
691 hv = 0;
692 lv = 0;
693 while (l1)
694 lv++, l1 &= l1 - 1;
695 while (h1)
696 lv++, h1 &= h1 - 1;
697 lv &= 1;
698 break;
700 case TRUNCATE:
701 /* This is just a change-of-mode, so do nothing. */
702 lv = l1, hv = h1;
703 break;
705 case ZERO_EXTEND:
706 if (op_mode == VOIDmode)
707 abort ();
709 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
710 return 0;
712 hv = 0;
713 lv = l1 & GET_MODE_MASK (op_mode);
714 break;
716 case SIGN_EXTEND:
717 if (op_mode == VOIDmode
718 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
719 return 0;
720 else
722 lv = l1 & GET_MODE_MASK (op_mode);
723 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
724 && (lv & ((HOST_WIDE_INT) 1
725 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
726 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
728 hv = HWI_SIGN_EXTEND (lv);
730 break;
732 case SQRT:
733 return 0;
735 default:
736 return 0;
739 return immed_double_const (lv, hv, mode);
742 else if (GET_CODE (trueop) == CONST_DOUBLE
743 && GET_MODE_CLASS (mode) == MODE_FLOAT)
745 REAL_VALUE_TYPE d, t;
746 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
748 switch (code)
750 case SQRT:
751 if (HONOR_SNANS (mode) && real_isnan (&d))
752 return 0;
753 real_sqrt (&t, mode, &d);
754 d = t;
755 break;
756 case ABS:
757 d = REAL_VALUE_ABS (d);
758 break;
759 case NEG:
760 d = REAL_VALUE_NEGATE (d);
761 break;
762 case FLOAT_TRUNCATE:
763 d = real_value_truncate (mode, d);
764 break;
765 case FLOAT_EXTEND:
766 /* All this does is change the mode. */
767 break;
768 case FIX:
769 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
770 break;
772 default:
773 abort ();
775 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
778 else if (GET_CODE (trueop) == CONST_DOUBLE
779 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
780 && GET_MODE_CLASS (mode) == MODE_INT
781 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
783 HOST_WIDE_INT i;
784 REAL_VALUE_TYPE d;
785 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
786 switch (code)
788 case FIX: i = REAL_VALUE_FIX (d); break;
789 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
790 default:
791 abort ();
793 return gen_int_mode (i, mode);
796 /* This was formerly used only for non-IEEE float.
797 eggert@twinsun.com says it is safe for IEEE also. */
798 else
800 enum rtx_code reversed;
801 /* There are some simplifications we can do even if the operands
802 aren't constant. */
803 switch (code)
805 case NOT:
806 /* (not (not X)) == X. */
807 if (GET_CODE (op) == NOT)
808 return XEXP (op, 0);
810 /* (not (eq X Y)) == (ne X Y), etc. */
811 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
812 && ((reversed = reversed_comparison_code (op, NULL_RTX))
813 != UNKNOWN))
814 return gen_rtx_fmt_ee (reversed,
815 op_mode, XEXP (op, 0), XEXP (op, 1));
816 break;
818 case NEG:
819 /* (neg (neg X)) == X. */
820 if (GET_CODE (op) == NEG)
821 return XEXP (op, 0);
822 break;
824 case SIGN_EXTEND:
825 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
826 becomes just the MINUS if its mode is MODE. This allows
827 folding switch statements on machines using casesi (such as
828 the VAX). */
829 if (GET_CODE (op) == TRUNCATE
830 && GET_MODE (XEXP (op, 0)) == mode
831 && GET_CODE (XEXP (op, 0)) == MINUS
832 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
833 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
834 return XEXP (op, 0);
836 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
837 if (! POINTERS_EXTEND_UNSIGNED
838 && mode == Pmode && GET_MODE (op) == ptr_mode
839 && (CONSTANT_P (op)
840 || (GET_CODE (op) == SUBREG
841 && GET_CODE (SUBREG_REG (op)) == REG
842 && REG_POINTER (SUBREG_REG (op))
843 && GET_MODE (SUBREG_REG (op)) == Pmode)))
844 return convert_memory_address (Pmode, op);
845 #endif
846 break;
848 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
849 case ZERO_EXTEND:
850 if (POINTERS_EXTEND_UNSIGNED > 0
851 && mode == Pmode && GET_MODE (op) == ptr_mode
852 && (CONSTANT_P (op)
853 || (GET_CODE (op) == SUBREG
854 && GET_CODE (SUBREG_REG (op)) == REG
855 && REG_POINTER (SUBREG_REG (op))
856 && GET_MODE (SUBREG_REG (op)) == Pmode)))
857 return convert_memory_address (Pmode, op);
858 break;
859 #endif
861 default:
862 break;
865 return 0;
869 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
870 and OP1. Return 0 if no simplification is possible.
872 Don't use this for relational operations such as EQ or LT.
873 Use simplify_relational_operation instead. */
875 simplify_binary_operation (code, mode, op0, op1)
876 enum rtx_code code;
877 enum machine_mode mode;
878 rtx op0, op1;
880 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
881 HOST_WIDE_INT val;
882 unsigned int width = GET_MODE_BITSIZE (mode);
883 rtx tem;
884 rtx trueop0 = avoid_constant_pool_reference (op0);
885 rtx trueop1 = avoid_constant_pool_reference (op1);
887 /* Relational operations don't work here. We must know the mode
888 of the operands in order to do the comparison correctly.
889 Assuming a full word can give incorrect results.
890 Consider comparing 128 with -128 in QImode. */
892 if (GET_RTX_CLASS (code) == '<')
893 abort ();
895 /* Make sure the constant is second. */
896 if (GET_RTX_CLASS (code) == 'c'
897 && swap_commutative_operands_p (trueop0, trueop1))
899 tem = op0, op0 = op1, op1 = tem;
900 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
903 if (VECTOR_MODE_P (mode)
904 && GET_CODE (trueop0) == CONST_VECTOR
905 && GET_CODE (trueop1) == CONST_VECTOR)
907 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
908 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
909 enum machine_mode op0mode = GET_MODE (trueop0);
910 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
911 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
912 enum machine_mode op1mode = GET_MODE (trueop1);
913 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
914 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
915 rtvec v = rtvec_alloc (n_elts);
916 unsigned int i;
918 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
919 abort ();
921 for (i = 0; i < n_elts; i++)
923 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
924 CONST_VECTOR_ELT (trueop0, i),
925 CONST_VECTOR_ELT (trueop1, i));
926 if (!x)
927 return 0;
928 RTVEC_ELT (v, i) = x;
931 return gen_rtx_CONST_VECTOR (mode, v);
934 if (GET_MODE_CLASS (mode) == MODE_FLOAT
935 && GET_CODE (trueop0) == CONST_DOUBLE
936 && GET_CODE (trueop1) == CONST_DOUBLE
937 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
939 REAL_VALUE_TYPE f0, f1, value;
941 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
942 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
943 f0 = real_value_truncate (mode, f0);
944 f1 = real_value_truncate (mode, f1);
946 if (code == DIV
947 && !MODE_HAS_INFINITIES (mode)
948 && REAL_VALUES_EQUAL (f1, dconst0))
949 return 0;
951 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
953 value = real_value_truncate (mode, value);
954 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
957 /* We can fold some multi-word operations. */
958 if (GET_MODE_CLASS (mode) == MODE_INT
959 && width == HOST_BITS_PER_WIDE_INT * 2
960 && (GET_CODE (trueop0) == CONST_DOUBLE
961 || GET_CODE (trueop0) == CONST_INT)
962 && (GET_CODE (trueop1) == CONST_DOUBLE
963 || GET_CODE (trueop1) == CONST_INT))
965 unsigned HOST_WIDE_INT l1, l2, lv;
966 HOST_WIDE_INT h1, h2, hv;
968 if (GET_CODE (trueop0) == CONST_DOUBLE)
969 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
970 else
971 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
973 if (GET_CODE (trueop1) == CONST_DOUBLE)
974 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
975 else
976 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
978 switch (code)
980 case MINUS:
981 /* A - B == A + (-B). */
982 neg_double (l2, h2, &lv, &hv);
983 l2 = lv, h2 = hv;
985 /* .. fall through ... */
987 case PLUS:
988 add_double (l1, h1, l2, h2, &lv, &hv);
989 break;
991 case MULT:
992 mul_double (l1, h1, l2, h2, &lv, &hv);
993 break;
995 case DIV: case MOD: case UDIV: case UMOD:
996 /* We'd need to include tree.h to do this and it doesn't seem worth
997 it. */
998 return 0;
1000 case AND:
1001 lv = l1 & l2, hv = h1 & h2;
1002 break;
1004 case IOR:
1005 lv = l1 | l2, hv = h1 | h2;
1006 break;
1008 case XOR:
1009 lv = l1 ^ l2, hv = h1 ^ h2;
1010 break;
1012 case SMIN:
1013 if (h1 < h2
1014 || (h1 == h2
1015 && ((unsigned HOST_WIDE_INT) l1
1016 < (unsigned HOST_WIDE_INT) l2)))
1017 lv = l1, hv = h1;
1018 else
1019 lv = l2, hv = h2;
1020 break;
1022 case SMAX:
1023 if (h1 > h2
1024 || (h1 == h2
1025 && ((unsigned HOST_WIDE_INT) l1
1026 > (unsigned HOST_WIDE_INT) l2)))
1027 lv = l1, hv = h1;
1028 else
1029 lv = l2, hv = h2;
1030 break;
1032 case UMIN:
1033 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1034 || (h1 == h2
1035 && ((unsigned HOST_WIDE_INT) l1
1036 < (unsigned HOST_WIDE_INT) l2)))
1037 lv = l1, hv = h1;
1038 else
1039 lv = l2, hv = h2;
1040 break;
1042 case UMAX:
1043 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1044 || (h1 == h2
1045 && ((unsigned HOST_WIDE_INT) l1
1046 > (unsigned HOST_WIDE_INT) l2)))
1047 lv = l1, hv = h1;
1048 else
1049 lv = l2, hv = h2;
1050 break;
1052 case LSHIFTRT: case ASHIFTRT:
1053 case ASHIFT:
1054 case ROTATE: case ROTATERT:
1055 #ifdef SHIFT_COUNT_TRUNCATED
1056 if (SHIFT_COUNT_TRUNCATED)
1057 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1058 #endif
1060 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1061 return 0;
1063 if (code == LSHIFTRT || code == ASHIFTRT)
1064 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1065 code == ASHIFTRT);
1066 else if (code == ASHIFT)
1067 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1068 else if (code == ROTATE)
1069 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1070 else /* code == ROTATERT */
1071 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1072 break;
1074 default:
1075 return 0;
1078 return immed_double_const (lv, hv, mode);
1081 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1082 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1084 /* Even if we can't compute a constant result,
1085 there are some cases worth simplifying. */
1087 switch (code)
1089 case PLUS:
1090 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1091 when x is NaN, infinite, or finite and nonzero. They aren't
1092 when x is -0 and the rounding mode is not towards -infinity,
1093 since (-0) + 0 is then 0. */
1094 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1095 return op0;
1097 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1098 transformations are safe even for IEEE. */
1099 if (GET_CODE (op0) == NEG)
1100 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1101 else if (GET_CODE (op1) == NEG)
1102 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1104 /* (~a) + 1 -> -a */
1105 if (INTEGRAL_MODE_P (mode)
1106 && GET_CODE (op0) == NOT
1107 && trueop1 == const1_rtx)
1108 return gen_rtx_NEG (mode, XEXP (op0, 0));
1110 /* Handle both-operands-constant cases. We can only add
1111 CONST_INTs to constants since the sum of relocatable symbols
1112 can't be handled by most assemblers. Don't add CONST_INT
1113 to CONST_INT since overflow won't be computed properly if wider
1114 than HOST_BITS_PER_WIDE_INT. */
1116 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1117 && GET_CODE (op1) == CONST_INT)
1118 return plus_constant (op0, INTVAL (op1));
1119 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1120 && GET_CODE (op0) == CONST_INT)
1121 return plus_constant (op1, INTVAL (op0));
1123 /* See if this is something like X * C - X or vice versa or
1124 if the multiplication is written as a shift. If so, we can
1125 distribute and make a new multiply, shift, or maybe just
1126 have X (if C is 2 in the example above). But don't make
1127 real multiply if we didn't have one before. */
1129 if (! FLOAT_MODE_P (mode))
1131 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1132 rtx lhs = op0, rhs = op1;
1133 int had_mult = 0;
1135 if (GET_CODE (lhs) == NEG)
1136 coeff0 = -1, lhs = XEXP (lhs, 0);
1137 else if (GET_CODE (lhs) == MULT
1138 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1140 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1141 had_mult = 1;
1143 else if (GET_CODE (lhs) == ASHIFT
1144 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1145 && INTVAL (XEXP (lhs, 1)) >= 0
1146 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1148 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1149 lhs = XEXP (lhs, 0);
1152 if (GET_CODE (rhs) == NEG)
1153 coeff1 = -1, rhs = XEXP (rhs, 0);
1154 else if (GET_CODE (rhs) == MULT
1155 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1157 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1158 had_mult = 1;
1160 else if (GET_CODE (rhs) == ASHIFT
1161 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1162 && INTVAL (XEXP (rhs, 1)) >= 0
1163 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1165 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1166 rhs = XEXP (rhs, 0);
1169 if (rtx_equal_p (lhs, rhs))
1171 tem = simplify_gen_binary (MULT, mode, lhs,
1172 GEN_INT (coeff0 + coeff1));
1173 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1177 /* If one of the operands is a PLUS or a MINUS, see if we can
1178 simplify this by the associative law.
1179 Don't use the associative law for floating point.
1180 The inaccuracy makes it nonassociative,
1181 and subtle programs can break if operations are associated. */
1183 if (INTEGRAL_MODE_P (mode)
1184 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1185 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1186 || (GET_CODE (op0) == CONST
1187 && GET_CODE (XEXP (op0, 0)) == PLUS)
1188 || (GET_CODE (op1) == CONST
1189 && GET_CODE (XEXP (op1, 0)) == PLUS))
1190 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1191 return tem;
1192 break;
1194 case COMPARE:
1195 #ifdef HAVE_cc0
1196 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1197 using cc0, in which case we want to leave it as a COMPARE
1198 so we can distinguish it from a register-register-copy.
1200 In IEEE floating point, x-0 is not the same as x. */
1202 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1203 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1204 && trueop1 == CONST0_RTX (mode))
1205 return op0;
1206 #endif
1208 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1209 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1210 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1211 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1213 rtx xop00 = XEXP (op0, 0);
1214 rtx xop10 = XEXP (op1, 0);
1216 #ifdef HAVE_cc0
1217 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1218 #else
1219 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1220 && GET_MODE (xop00) == GET_MODE (xop10)
1221 && REGNO (xop00) == REGNO (xop10)
1222 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1223 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1224 #endif
1225 return xop00;
1227 break;
1229 case MINUS:
1230 /* We can't assume x-x is 0 even with non-IEEE floating point,
1231 but since it is zero except in very strange circumstances, we
1232 will treat it as zero with -funsafe-math-optimizations. */
1233 if (rtx_equal_p (trueop0, trueop1)
1234 && ! side_effects_p (op0)
1235 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1236 return CONST0_RTX (mode);
1238 /* Change subtraction from zero into negation. (0 - x) is the
1239 same as -x when x is NaN, infinite, or finite and nonzero.
1240 But if the mode has signed zeros, and does not round towards
1241 -infinity, then 0 - 0 is 0, not -0. */
1242 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1243 return gen_rtx_NEG (mode, op1);
1245 /* (-1 - a) is ~a. */
1246 if (trueop0 == constm1_rtx)
1247 return gen_rtx_NOT (mode, op1);
1249 /* Subtracting 0 has no effect unless the mode has signed zeros
1250 and supports rounding towards -infinity. In such a case,
1251 0 - 0 is -0. */
1252 if (!(HONOR_SIGNED_ZEROS (mode)
1253 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1254 && trueop1 == CONST0_RTX (mode))
1255 return op0;
1257 /* See if this is something like X * C - X or vice versa or
1258 if the multiplication is written as a shift. If so, we can
1259 distribute and make a new multiply, shift, or maybe just
1260 have X (if C is 2 in the example above). But don't make
1261 real multiply if we didn't have one before. */
1263 if (! FLOAT_MODE_P (mode))
1265 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1266 rtx lhs = op0, rhs = op1;
1267 int had_mult = 0;
1269 if (GET_CODE (lhs) == NEG)
1270 coeff0 = -1, lhs = XEXP (lhs, 0);
1271 else if (GET_CODE (lhs) == MULT
1272 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1274 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1275 had_mult = 1;
1277 else if (GET_CODE (lhs) == ASHIFT
1278 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1279 && INTVAL (XEXP (lhs, 1)) >= 0
1280 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1282 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1283 lhs = XEXP (lhs, 0);
1286 if (GET_CODE (rhs) == NEG)
1287 coeff1 = - 1, rhs = XEXP (rhs, 0);
1288 else if (GET_CODE (rhs) == MULT
1289 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1291 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1292 had_mult = 1;
1294 else if (GET_CODE (rhs) == ASHIFT
1295 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1296 && INTVAL (XEXP (rhs, 1)) >= 0
1297 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1299 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1300 rhs = XEXP (rhs, 0);
1303 if (rtx_equal_p (lhs, rhs))
1305 tem = simplify_gen_binary (MULT, mode, lhs,
1306 GEN_INT (coeff0 - coeff1));
1307 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1311 /* (a - (-b)) -> (a + b). True even for IEEE. */
1312 if (GET_CODE (op1) == NEG)
1313 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1315 /* If one of the operands is a PLUS or a MINUS, see if we can
1316 simplify this by the associative law.
1317 Don't use the associative law for floating point.
1318 The inaccuracy makes it nonassociative,
1319 and subtle programs can break if operations are associated. */
1321 if (INTEGRAL_MODE_P (mode)
1322 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1323 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1324 || (GET_CODE (op0) == CONST
1325 && GET_CODE (XEXP (op0, 0)) == PLUS)
1326 || (GET_CODE (op1) == CONST
1327 && GET_CODE (XEXP (op1, 0)) == PLUS))
1328 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1329 return tem;
1331 /* Don't let a relocatable value get a negative coeff. */
1332 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1333 return simplify_gen_binary (PLUS, mode,
1334 op0,
1335 neg_const_int (mode, op1));
1337 /* (x - (x & y)) -> (x & ~y) */
1338 if (GET_CODE (op1) == AND)
1340 if (rtx_equal_p (op0, XEXP (op1, 0)))
1341 return simplify_gen_binary (AND, mode, op0,
1342 gen_rtx_NOT (mode, XEXP (op1, 1)));
1343 if (rtx_equal_p (op0, XEXP (op1, 1)))
1344 return simplify_gen_binary (AND, mode, op0,
1345 gen_rtx_NOT (mode, XEXP (op1, 0)));
1347 break;
1349 case MULT:
1350 if (trueop1 == constm1_rtx)
1352 tem = simplify_unary_operation (NEG, mode, op0, mode);
1354 return tem ? tem : gen_rtx_NEG (mode, op0);
1357 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1358 x is NaN, since x * 0 is then also NaN. Nor is it valid
1359 when the mode has signed zeros, since multiplying a negative
1360 number by 0 will give -0, not 0. */
1361 if (!HONOR_NANS (mode)
1362 && !HONOR_SIGNED_ZEROS (mode)
1363 && trueop1 == CONST0_RTX (mode)
1364 && ! side_effects_p (op0))
1365 return op1;
1367 /* In IEEE floating point, x*1 is not equivalent to x for
1368 signalling NaNs. */
1369 if (!HONOR_SNANS (mode)
1370 && trueop1 == CONST1_RTX (mode))
1371 return op0;
1373 /* Convert multiply by constant power of two into shift unless
1374 we are still generating RTL. This test is a kludge. */
1375 if (GET_CODE (trueop1) == CONST_INT
1376 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1377 /* If the mode is larger than the host word size, and the
1378 uppermost bit is set, then this isn't a power of two due
1379 to implicit sign extension. */
1380 && (width <= HOST_BITS_PER_WIDE_INT
1381 || val != HOST_BITS_PER_WIDE_INT - 1)
1382 && ! rtx_equal_function_value_matters)
1383 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1385 /* x*2 is x+x and x*(-1) is -x */
1386 if (GET_CODE (trueop1) == CONST_DOUBLE
1387 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1388 && GET_MODE (op0) == mode)
1390 REAL_VALUE_TYPE d;
1391 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1393 if (REAL_VALUES_EQUAL (d, dconst2))
1394 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1396 if (REAL_VALUES_EQUAL (d, dconstm1))
1397 return gen_rtx_NEG (mode, op0);
1399 break;
1401 case IOR:
1402 if (trueop1 == const0_rtx)
1403 return op0;
1404 if (GET_CODE (trueop1) == CONST_INT
1405 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1406 == GET_MODE_MASK (mode)))
1407 return op1;
1408 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1409 return op0;
1410 /* A | (~A) -> -1 */
1411 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1412 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1413 && ! side_effects_p (op0)
1414 && GET_MODE_CLASS (mode) != MODE_CC)
1415 return constm1_rtx;
1416 break;
1418 case XOR:
1419 if (trueop1 == const0_rtx)
1420 return op0;
1421 if (GET_CODE (trueop1) == CONST_INT
1422 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1423 == GET_MODE_MASK (mode)))
1424 return gen_rtx_NOT (mode, op0);
1425 if (trueop0 == trueop1 && ! side_effects_p (op0)
1426 && GET_MODE_CLASS (mode) != MODE_CC)
1427 return const0_rtx;
1428 break;
1430 case AND:
1431 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1432 return const0_rtx;
1433 if (GET_CODE (trueop1) == CONST_INT
1434 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1435 == GET_MODE_MASK (mode)))
1436 return op0;
1437 if (trueop0 == trueop1 && ! side_effects_p (op0)
1438 && GET_MODE_CLASS (mode) != MODE_CC)
1439 return op0;
1440 /* A & (~A) -> 0 */
1441 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1442 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1443 && ! side_effects_p (op0)
1444 && GET_MODE_CLASS (mode) != MODE_CC)
1445 return const0_rtx;
1446 break;
1448 case UDIV:
1449 /* Convert divide by power of two into shift (divide by 1 handled
1450 below). */
1451 if (GET_CODE (trueop1) == CONST_INT
1452 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1453 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1455 /* ... fall through ... */
1457 case DIV:
1458 if (trueop1 == CONST1_RTX (mode))
1460 /* On some platforms DIV uses narrower mode than its
1461 operands. */
1462 rtx x = gen_lowpart_common (mode, op0);
1463 if (x)
1464 return x;
1465 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1466 return gen_lowpart_SUBREG (mode, op0);
1467 else
1468 return op0;
1471 /* Maybe change 0 / x to 0. This transformation isn't safe for
1472 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1473 Nor is it safe for modes with signed zeros, since dividing
1474 0 by a negative number gives -0, not 0. */
1475 if (!HONOR_NANS (mode)
1476 && !HONOR_SIGNED_ZEROS (mode)
1477 && trueop0 == CONST0_RTX (mode)
1478 && ! side_effects_p (op1))
1479 return op0;
1481 /* Change division by a constant into multiplication. Only do
1482 this with -funsafe-math-optimizations. */
1483 else if (GET_CODE (trueop1) == CONST_DOUBLE
1484 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1485 && trueop1 != CONST0_RTX (mode)
1486 && flag_unsafe_math_optimizations)
1488 REAL_VALUE_TYPE d;
1489 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1491 if (! REAL_VALUES_EQUAL (d, dconst0))
1493 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1494 return gen_rtx_MULT (mode, op0,
1495 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1498 break;
1500 case UMOD:
1501 /* Handle modulus by power of two (mod with 1 handled below). */
1502 if (GET_CODE (trueop1) == CONST_INT
1503 && exact_log2 (INTVAL (trueop1)) > 0)
1504 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1506 /* ... fall through ... */
1508 case MOD:
1509 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1510 && ! side_effects_p (op0) && ! side_effects_p (op1))
1511 return const0_rtx;
1512 break;
1514 case ROTATERT:
1515 case ROTATE:
1516 case ASHIFTRT:
1517 /* Rotating ~0 always results in ~0. */
1518 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1519 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1520 && ! side_effects_p (op1))
1521 return op0;
1523 /* ... fall through ... */
1525 case ASHIFT:
1526 case LSHIFTRT:
1527 if (trueop1 == const0_rtx)
1528 return op0;
1529 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1530 return op0;
1531 break;
1533 case SMIN:
1534 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1535 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1536 && ! side_effects_p (op0))
1537 return op1;
1538 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1539 return op0;
1540 break;
1542 case SMAX:
1543 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1544 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1545 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1546 && ! side_effects_p (op0))
1547 return op1;
1548 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1549 return op0;
1550 break;
1552 case UMIN:
1553 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1554 return op1;
1555 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1556 return op0;
1557 break;
1559 case UMAX:
1560 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1561 return op1;
1562 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1563 return op0;
1564 break;
1566 case SS_PLUS:
1567 case US_PLUS:
1568 case SS_MINUS:
1569 case US_MINUS:
1570 /* ??? There are simplifications that can be done. */
1571 return 0;
1573 case VEC_SELECT:
1574 if (!VECTOR_MODE_P (mode))
1576 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1577 || (mode
1578 != GET_MODE_INNER (GET_MODE (trueop0)))
1579 || GET_CODE (trueop1) != PARALLEL
1580 || XVECLEN (trueop1, 0) != 1
1581 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1582 abort ();
1584 if (GET_CODE (trueop0) == CONST_VECTOR)
1585 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1587 else
1589 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1590 || (GET_MODE_INNER (mode)
1591 != GET_MODE_INNER (GET_MODE (trueop0)))
1592 || GET_CODE (trueop1) != PARALLEL)
1593 abort ();
1595 if (GET_CODE (trueop0) == CONST_VECTOR)
1597 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1598 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1599 rtvec v = rtvec_alloc (n_elts);
1600 unsigned int i;
1602 if (XVECLEN (trueop1, 0) != (int)n_elts)
1603 abort ();
1604 for (i = 0; i < n_elts; i++)
1606 rtx x = XVECEXP (trueop1, 0, i);
1608 if (GET_CODE (x) != CONST_INT)
1609 abort ();
1610 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1613 return gen_rtx_CONST_VECTOR (mode, v);
1616 return 0;
1617 case VEC_CONCAT:
1619 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1620 ? GET_MODE (trueop0)
1621 : GET_MODE_INNER (mode));
1622 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1623 ? GET_MODE (trueop1)
1624 : GET_MODE_INNER (mode));
1626 if (!VECTOR_MODE_P (mode)
1627 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1628 != GET_MODE_SIZE (mode)))
1629 abort ();
1631 if ((VECTOR_MODE_P (op0_mode)
1632 && (GET_MODE_INNER (mode)
1633 != GET_MODE_INNER (op0_mode)))
1634 || (!VECTOR_MODE_P (op0_mode)
1635 && GET_MODE_INNER (mode) != op0_mode))
1636 abort ();
1638 if ((VECTOR_MODE_P (op1_mode)
1639 && (GET_MODE_INNER (mode)
1640 != GET_MODE_INNER (op1_mode)))
1641 || (!VECTOR_MODE_P (op1_mode)
1642 && GET_MODE_INNER (mode) != op1_mode))
1643 abort ();
1645 if ((GET_CODE (trueop0) == CONST_VECTOR
1646 || GET_CODE (trueop0) == CONST_INT
1647 || GET_CODE (trueop0) == CONST_DOUBLE)
1648 && (GET_CODE (trueop1) == CONST_VECTOR
1649 || GET_CODE (trueop1) == CONST_INT
1650 || GET_CODE (trueop1) == CONST_DOUBLE))
1652 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1653 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1654 rtvec v = rtvec_alloc (n_elts);
1655 unsigned int i;
1656 unsigned in_n_elts = 1;
1658 if (VECTOR_MODE_P (op0_mode))
1659 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1660 for (i = 0; i < n_elts; i++)
1662 if (i < in_n_elts)
1664 if (!VECTOR_MODE_P (op0_mode))
1665 RTVEC_ELT (v, i) = trueop0;
1666 else
1667 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1669 else
1671 if (!VECTOR_MODE_P (op1_mode))
1672 RTVEC_ELT (v, i) = trueop1;
1673 else
1674 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1675 i - in_n_elts);
1679 return gen_rtx_CONST_VECTOR (mode, v);
1682 return 0;
1684 default:
1685 abort ();
1688 return 0;
1691 /* Get the integer argument values in two forms:
1692 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1694 arg0 = INTVAL (trueop0);
1695 arg1 = INTVAL (trueop1);
1697 if (width < HOST_BITS_PER_WIDE_INT)
1699 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1700 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1702 arg0s = arg0;
1703 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1704 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1706 arg1s = arg1;
1707 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1708 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1710 else
1712 arg0s = arg0;
1713 arg1s = arg1;
1716 /* Compute the value of the arithmetic. */
1718 switch (code)
1720 case PLUS:
1721 val = arg0s + arg1s;
1722 break;
1724 case MINUS:
1725 val = arg0s - arg1s;
1726 break;
1728 case MULT:
1729 val = arg0s * arg1s;
1730 break;
1732 case DIV:
1733 if (arg1s == 0
1734 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1735 && arg1s == -1))
1736 return 0;
1737 val = arg0s / arg1s;
1738 break;
1740 case MOD:
1741 if (arg1s == 0
1742 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1743 && arg1s == -1))
1744 return 0;
1745 val = arg0s % arg1s;
1746 break;
1748 case UDIV:
1749 if (arg1 == 0
1750 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1751 && arg1s == -1))
1752 return 0;
1753 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1754 break;
1756 case UMOD:
1757 if (arg1 == 0
1758 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1759 && arg1s == -1))
1760 return 0;
1761 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1762 break;
1764 case AND:
1765 val = arg0 & arg1;
1766 break;
1768 case IOR:
1769 val = arg0 | arg1;
1770 break;
1772 case XOR:
1773 val = arg0 ^ arg1;
1774 break;
1776 case LSHIFTRT:
1777 /* If shift count is undefined, don't fold it; let the machine do
1778 what it wants. But truncate it if the machine will do that. */
1779 if (arg1 < 0)
1780 return 0;
1782 #ifdef SHIFT_COUNT_TRUNCATED
1783 if (SHIFT_COUNT_TRUNCATED)
1784 arg1 %= width;
1785 #endif
1787 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1788 break;
1790 case ASHIFT:
1791 if (arg1 < 0)
1792 return 0;
1794 #ifdef SHIFT_COUNT_TRUNCATED
1795 if (SHIFT_COUNT_TRUNCATED)
1796 arg1 %= width;
1797 #endif
1799 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1800 break;
1802 case ASHIFTRT:
1803 if (arg1 < 0)
1804 return 0;
1806 #ifdef SHIFT_COUNT_TRUNCATED
1807 if (SHIFT_COUNT_TRUNCATED)
1808 arg1 %= width;
1809 #endif
1811 val = arg0s >> arg1;
1813 /* Bootstrap compiler may not have sign extended the right shift.
1814 Manually extend the sign to insure bootstrap cc matches gcc. */
1815 if (arg0s < 0 && arg1 > 0)
1816 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1818 break;
1820 case ROTATERT:
1821 if (arg1 < 0)
1822 return 0;
1824 arg1 %= width;
1825 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1826 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1827 break;
1829 case ROTATE:
1830 if (arg1 < 0)
1831 return 0;
1833 arg1 %= width;
1834 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1835 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1836 break;
1838 case COMPARE:
1839 /* Do nothing here. */
1840 return 0;
1842 case SMIN:
1843 val = arg0s <= arg1s ? arg0s : arg1s;
1844 break;
1846 case UMIN:
1847 val = ((unsigned HOST_WIDE_INT) arg0
1848 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1849 break;
1851 case SMAX:
1852 val = arg0s > arg1s ? arg0s : arg1s;
1853 break;
1855 case UMAX:
1856 val = ((unsigned HOST_WIDE_INT) arg0
1857 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1858 break;
1860 default:
1861 abort ();
1864 val = trunc_int_for_mode (val, mode);
1866 return GEN_INT (val);
1869 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1870 PLUS or MINUS.
1872 Rather than test for specific case, we do this by a brute-force method
1873 and do all possible simplifications until no more changes occur. Then
1874 we rebuild the operation.
1876 If FORCE is true, then always generate the rtx. This is used to
1877 canonicalize stuff emitted from simplify_gen_binary. Note that this
1878 can still fail if the rtx is too complex. It won't fail just because
1879 the result is not 'simpler' than the input, however. */
1881 struct simplify_plus_minus_op_data
1883 rtx op;
1884 int neg;
1887 static int
1888 simplify_plus_minus_op_data_cmp (p1, p2)
1889 const void *p1;
1890 const void *p2;
1892 const struct simplify_plus_minus_op_data *d1 = p1;
1893 const struct simplify_plus_minus_op_data *d2 = p2;
1895 return (commutative_operand_precedence (d2->op)
1896 - commutative_operand_precedence (d1->op));
1899 static rtx
1900 simplify_plus_minus (code, mode, op0, op1, force)
1901 enum rtx_code code;
1902 enum machine_mode mode;
1903 rtx op0, op1;
1904 int force;
1906 struct simplify_plus_minus_op_data ops[8];
1907 rtx result, tem;
1908 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1909 int first, negate, changed;
1910 int i, j;
1912 memset ((char *) ops, 0, sizeof ops);
1914 /* Set up the two operands and then expand them until nothing has been
1915 changed. If we run out of room in our array, give up; this should
1916 almost never happen. */
1918 ops[0].op = op0;
1919 ops[0].neg = 0;
1920 ops[1].op = op1;
1921 ops[1].neg = (code == MINUS);
1925 changed = 0;
1927 for (i = 0; i < n_ops; i++)
1929 rtx this_op = ops[i].op;
1930 int this_neg = ops[i].neg;
1931 enum rtx_code this_code = GET_CODE (this_op);
1933 switch (this_code)
1935 case PLUS:
1936 case MINUS:
1937 if (n_ops == 7)
1938 return NULL_RTX;
1940 ops[n_ops].op = XEXP (this_op, 1);
1941 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1942 n_ops++;
1944 ops[i].op = XEXP (this_op, 0);
1945 input_ops++;
1946 changed = 1;
1947 break;
1949 case NEG:
1950 ops[i].op = XEXP (this_op, 0);
1951 ops[i].neg = ! this_neg;
1952 changed = 1;
1953 break;
1955 case CONST:
1956 if (n_ops < 7
1957 && GET_CODE (XEXP (this_op, 0)) == PLUS
1958 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1959 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1961 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1962 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1963 ops[n_ops].neg = this_neg;
1964 n_ops++;
1965 input_consts++;
1966 changed = 1;
1968 break;
1970 case NOT:
1971 /* ~a -> (-a - 1) */
1972 if (n_ops != 7)
1974 ops[n_ops].op = constm1_rtx;
1975 ops[n_ops++].neg = this_neg;
1976 ops[i].op = XEXP (this_op, 0);
1977 ops[i].neg = !this_neg;
1978 changed = 1;
1980 break;
1982 case CONST_INT:
1983 if (this_neg)
1985 ops[i].op = neg_const_int (mode, this_op);
1986 ops[i].neg = 0;
1987 changed = 1;
1989 break;
1991 default:
1992 break;
1996 while (changed);
1998 /* If we only have two operands, we can't do anything. */
1999 if (n_ops <= 2 && !force)
2000 return NULL_RTX;
2002 /* Count the number of CONSTs we didn't split above. */
2003 for (i = 0; i < n_ops; i++)
2004 if (GET_CODE (ops[i].op) == CONST)
2005 input_consts++;
2007 /* Now simplify each pair of operands until nothing changes. The first
2008 time through just simplify constants against each other. */
2010 first = 1;
2013 changed = first;
2015 for (i = 0; i < n_ops - 1; i++)
2016 for (j = i + 1; j < n_ops; j++)
2018 rtx lhs = ops[i].op, rhs = ops[j].op;
2019 int lneg = ops[i].neg, rneg = ops[j].neg;
2021 if (lhs != 0 && rhs != 0
2022 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2024 enum rtx_code ncode = PLUS;
2026 if (lneg != rneg)
2028 ncode = MINUS;
2029 if (lneg)
2030 tem = lhs, lhs = rhs, rhs = tem;
2032 else if (swap_commutative_operands_p (lhs, rhs))
2033 tem = lhs, lhs = rhs, rhs = tem;
2035 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2037 /* Reject "simplifications" that just wrap the two
2038 arguments in a CONST. Failure to do so can result
2039 in infinite recursion with simplify_binary_operation
2040 when it calls us to simplify CONST operations. */
2041 if (tem
2042 && ! (GET_CODE (tem) == CONST
2043 && GET_CODE (XEXP (tem, 0)) == ncode
2044 && XEXP (XEXP (tem, 0), 0) == lhs
2045 && XEXP (XEXP (tem, 0), 1) == rhs)
2046 /* Don't allow -x + -1 -> ~x simplifications in the
2047 first pass. This allows us the chance to combine
2048 the -1 with other constants. */
2049 && ! (first
2050 && GET_CODE (tem) == NOT
2051 && XEXP (tem, 0) == rhs))
2053 lneg &= rneg;
2054 if (GET_CODE (tem) == NEG)
2055 tem = XEXP (tem, 0), lneg = !lneg;
2056 if (GET_CODE (tem) == CONST_INT && lneg)
2057 tem = neg_const_int (mode, tem), lneg = 0;
2059 ops[i].op = tem;
2060 ops[i].neg = lneg;
2061 ops[j].op = NULL_RTX;
2062 changed = 1;
2067 first = 0;
2069 while (changed);
2071 /* Pack all the operands to the lower-numbered entries. */
2072 for (i = 0, j = 0; j < n_ops; j++)
2073 if (ops[j].op)
2074 ops[i++] = ops[j];
2075 n_ops = i;
2077 /* Sort the operations based on swap_commutative_operands_p. */
2078 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2080 /* We suppressed creation of trivial CONST expressions in the
2081 combination loop to avoid recursion. Create one manually now.
2082 The combination loop should have ensured that there is exactly
2083 one CONST_INT, and the sort will have ensured that it is last
2084 in the array and that any other constant will be next-to-last. */
2086 if (n_ops > 1
2087 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2088 && CONSTANT_P (ops[n_ops - 2].op))
2090 rtx value = ops[n_ops - 1].op;
2091 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2092 value = neg_const_int (mode, value);
2093 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2094 n_ops--;
2097 /* Count the number of CONSTs that we generated. */
2098 n_consts = 0;
2099 for (i = 0; i < n_ops; i++)
2100 if (GET_CODE (ops[i].op) == CONST)
2101 n_consts++;
2103 /* Give up if we didn't reduce the number of operands we had. Make
2104 sure we count a CONST as two operands. If we have the same
2105 number of operands, but have made more CONSTs than before, this
2106 is also an improvement, so accept it. */
2107 if (!force
2108 && (n_ops + n_consts > input_ops
2109 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2110 return NULL_RTX;
2112 /* Put a non-negated operand first. If there aren't any, make all
2113 operands positive and negate the whole thing later. */
2115 negate = 0;
2116 for (i = 0; i < n_ops && ops[i].neg; i++)
2117 continue;
2118 if (i == n_ops)
2120 for (i = 0; i < n_ops; i++)
2121 ops[i].neg = 0;
2122 negate = 1;
2124 else if (i != 0)
2126 tem = ops[0].op;
2127 ops[0] = ops[i];
2128 ops[i].op = tem;
2129 ops[i].neg = 1;
2132 /* Now make the result by performing the requested operations. */
2133 result = ops[0].op;
2134 for (i = 1; i < n_ops; i++)
2135 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2136 mode, result, ops[i].op);
2138 return negate ? gen_rtx_NEG (mode, result) : result;
2141 /* Like simplify_binary_operation except used for relational operators.
2142 MODE is the mode of the operands, not that of the result. If MODE
2143 is VOIDmode, both operands must also be VOIDmode and we compare the
2144 operands in "infinite precision".
2146 If no simplification is possible, this function returns zero. Otherwise,
2147 it returns either const_true_rtx or const0_rtx. */
2150 simplify_relational_operation (code, mode, op0, op1)
2151 enum rtx_code code;
2152 enum machine_mode mode;
2153 rtx op0, op1;
2155 int equal, op0lt, op0ltu, op1lt, op1ltu;
2156 rtx tem;
2157 rtx trueop0;
2158 rtx trueop1;
2160 if (mode == VOIDmode
2161 && (GET_MODE (op0) != VOIDmode
2162 || GET_MODE (op1) != VOIDmode))
2163 abort ();
2165 /* If op0 is a compare, extract the comparison arguments from it. */
2166 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2167 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2169 trueop0 = avoid_constant_pool_reference (op0);
2170 trueop1 = avoid_constant_pool_reference (op1);
2172 /* We can't simplify MODE_CC values since we don't know what the
2173 actual comparison is. */
2174 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
2175 #ifdef HAVE_cc0
2176 || op0 == cc0_rtx
2177 #endif
2179 return 0;
2181 /* Make sure the constant is second. */
2182 if (swap_commutative_operands_p (trueop0, trueop1))
2184 tem = op0, op0 = op1, op1 = tem;
2185 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2186 code = swap_condition (code);
2189 /* For integer comparisons of A and B maybe we can simplify A - B and can
2190 then simplify a comparison of that with zero. If A and B are both either
2191 a register or a CONST_INT, this can't help; testing for these cases will
2192 prevent infinite recursion here and speed things up.
2194 If CODE is an unsigned comparison, then we can never do this optimization,
2195 because it gives an incorrect result if the subtraction wraps around zero.
2196 ANSI C defines unsigned operations such that they never overflow, and
2197 thus such cases can not be ignored. */
2199 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2200 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2201 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2202 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2203 && code != GTU && code != GEU && code != LTU && code != LEU)
2204 return simplify_relational_operation (signed_condition (code),
2205 mode, tem, const0_rtx);
2207 if (flag_unsafe_math_optimizations && code == ORDERED)
2208 return const_true_rtx;
2210 if (flag_unsafe_math_optimizations && code == UNORDERED)
2211 return const0_rtx;
2213 /* For modes without NaNs, if the two operands are equal, we know the
2214 result. */
2215 if (!HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1))
2216 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2218 /* If the operands are floating-point constants, see if we can fold
2219 the result. */
2220 else if (GET_CODE (trueop0) == CONST_DOUBLE
2221 && GET_CODE (trueop1) == CONST_DOUBLE
2222 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2224 REAL_VALUE_TYPE d0, d1;
2226 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2227 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2229 /* Comparisons are unordered iff at least one of the values is NaN. */
2230 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2231 switch (code)
2233 case UNEQ:
2234 case UNLT:
2235 case UNGT:
2236 case UNLE:
2237 case UNGE:
2238 case NE:
2239 case UNORDERED:
2240 return const_true_rtx;
2241 case EQ:
2242 case LT:
2243 case GT:
2244 case LE:
2245 case GE:
2246 case LTGT:
2247 case ORDERED:
2248 return const0_rtx;
2249 default:
2250 return 0;
2253 equal = REAL_VALUES_EQUAL (d0, d1);
2254 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2255 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2258 /* Otherwise, see if the operands are both integers. */
2259 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2260 && (GET_CODE (trueop0) == CONST_DOUBLE
2261 || GET_CODE (trueop0) == CONST_INT)
2262 && (GET_CODE (trueop1) == CONST_DOUBLE
2263 || GET_CODE (trueop1) == CONST_INT))
2265 int width = GET_MODE_BITSIZE (mode);
2266 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2267 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2269 /* Get the two words comprising each integer constant. */
2270 if (GET_CODE (trueop0) == CONST_DOUBLE)
2272 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2273 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2275 else
2277 l0u = l0s = INTVAL (trueop0);
2278 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2281 if (GET_CODE (trueop1) == CONST_DOUBLE)
2283 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2284 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2286 else
2288 l1u = l1s = INTVAL (trueop1);
2289 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2292 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2293 we have to sign or zero-extend the values. */
2294 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2296 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2297 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2299 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2300 l0s |= ((HOST_WIDE_INT) (-1) << width);
2302 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2303 l1s |= ((HOST_WIDE_INT) (-1) << width);
2305 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2306 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2308 equal = (h0u == h1u && l0u == l1u);
2309 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2310 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2311 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2312 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2315 /* Otherwise, there are some code-specific tests we can make. */
2316 else
2318 switch (code)
2320 case EQ:
2321 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2322 return const0_rtx;
2323 break;
2325 case NE:
2326 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2327 return const_true_rtx;
2328 break;
2330 case GEU:
2331 /* Unsigned values are never negative. */
2332 if (trueop1 == const0_rtx)
2333 return const_true_rtx;
2334 break;
2336 case LTU:
2337 if (trueop1 == const0_rtx)
2338 return const0_rtx;
2339 break;
2341 case LEU:
2342 /* Unsigned values are never greater than the largest
2343 unsigned value. */
2344 if (GET_CODE (trueop1) == CONST_INT
2345 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2346 && INTEGRAL_MODE_P (mode))
2347 return const_true_rtx;
2348 break;
2350 case GTU:
2351 if (GET_CODE (trueop1) == CONST_INT
2352 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2353 && INTEGRAL_MODE_P (mode))
2354 return const0_rtx;
2355 break;
2357 case LT:
2358 /* Optimize abs(x) < 0.0. */
2359 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2361 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2362 : trueop0;
2363 if (GET_CODE (tem) == ABS)
2364 return const0_rtx;
2366 break;
2368 case GE:
2369 /* Optimize abs(x) >= 0.0. */
2370 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2372 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2373 : trueop0;
2374 if (GET_CODE (tem) == ABS)
2375 return const1_rtx;
2377 break;
2379 default:
2380 break;
2383 return 0;
2386 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2387 as appropriate. */
2388 switch (code)
2390 case EQ:
2391 case UNEQ:
2392 return equal ? const_true_rtx : const0_rtx;
2393 case NE:
2394 case LTGT:
2395 return ! equal ? const_true_rtx : const0_rtx;
2396 case LT:
2397 case UNLT:
2398 return op0lt ? const_true_rtx : const0_rtx;
2399 case GT:
2400 case UNGT:
2401 return op1lt ? const_true_rtx : const0_rtx;
2402 case LTU:
2403 return op0ltu ? const_true_rtx : const0_rtx;
2404 case GTU:
2405 return op1ltu ? const_true_rtx : const0_rtx;
2406 case LE:
2407 case UNLE:
2408 return equal || op0lt ? const_true_rtx : const0_rtx;
2409 case GE:
2410 case UNGE:
2411 return equal || op1lt ? const_true_rtx : const0_rtx;
2412 case LEU:
2413 return equal || op0ltu ? const_true_rtx : const0_rtx;
2414 case GEU:
2415 return equal || op1ltu ? const_true_rtx : const0_rtx;
2416 case ORDERED:
2417 return const_true_rtx;
2418 case UNORDERED:
2419 return const0_rtx;
2420 default:
2421 abort ();
2425 /* Simplify CODE, an operation with result mode MODE and three operands,
2426 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2427 a constant. Return 0 if no simplifications is possible. */
2430 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2431 enum rtx_code code;
2432 enum machine_mode mode, op0_mode;
2433 rtx op0, op1, op2;
2435 unsigned int width = GET_MODE_BITSIZE (mode);
2437 /* VOIDmode means "infinite" precision. */
2438 if (width == 0)
2439 width = HOST_BITS_PER_WIDE_INT;
2441 switch (code)
2443 case SIGN_EXTRACT:
2444 case ZERO_EXTRACT:
2445 if (GET_CODE (op0) == CONST_INT
2446 && GET_CODE (op1) == CONST_INT
2447 && GET_CODE (op2) == CONST_INT
2448 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2449 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2451 /* Extracting a bit-field from a constant */
2452 HOST_WIDE_INT val = INTVAL (op0);
2454 if (BITS_BIG_ENDIAN)
2455 val >>= (GET_MODE_BITSIZE (op0_mode)
2456 - INTVAL (op2) - INTVAL (op1));
2457 else
2458 val >>= INTVAL (op2);
2460 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2462 /* First zero-extend. */
2463 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2464 /* If desired, propagate sign bit. */
2465 if (code == SIGN_EXTRACT
2466 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2467 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2470 /* Clear the bits that don't belong in our mode,
2471 unless they and our sign bit are all one.
2472 So we get either a reasonable negative value or a reasonable
2473 unsigned value for this mode. */
2474 if (width < HOST_BITS_PER_WIDE_INT
2475 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2476 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2477 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2479 return GEN_INT (val);
2481 break;
2483 case IF_THEN_ELSE:
2484 if (GET_CODE (op0) == CONST_INT)
2485 return op0 != const0_rtx ? op1 : op2;
2487 /* Convert a == b ? b : a to "a". */
2488 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2489 && !HONOR_NANS (mode)
2490 && rtx_equal_p (XEXP (op0, 0), op1)
2491 && rtx_equal_p (XEXP (op0, 1), op2))
2492 return op1;
2493 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2494 && !HONOR_NANS (mode)
2495 && rtx_equal_p (XEXP (op0, 1), op1)
2496 && rtx_equal_p (XEXP (op0, 0), op2))
2497 return op2;
2498 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2500 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2501 ? GET_MODE (XEXP (op0, 1))
2502 : GET_MODE (XEXP (op0, 0)));
2503 rtx temp;
2504 if (cmp_mode == VOIDmode)
2505 cmp_mode = op0_mode;
2506 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2507 XEXP (op0, 0), XEXP (op0, 1));
2509 /* See if any simplifications were possible. */
2510 if (temp == const0_rtx)
2511 return op2;
2512 else if (temp == const1_rtx)
2513 return op1;
2514 else if (temp)
2515 op0 = temp;
2517 /* Look for happy constants in op1 and op2. */
2518 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2520 HOST_WIDE_INT t = INTVAL (op1);
2521 HOST_WIDE_INT f = INTVAL (op2);
2523 if (t == STORE_FLAG_VALUE && f == 0)
2524 code = GET_CODE (op0);
2525 else if (t == 0 && f == STORE_FLAG_VALUE)
2527 enum rtx_code tmp;
2528 tmp = reversed_comparison_code (op0, NULL_RTX);
2529 if (tmp == UNKNOWN)
2530 break;
2531 code = tmp;
2533 else
2534 break;
2536 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2539 break;
2540 case VEC_MERGE:
2541 if (GET_MODE (op0) != mode
2542 || GET_MODE (op1) != mode
2543 || !VECTOR_MODE_P (mode))
2544 abort ();
2545 op2 = avoid_constant_pool_reference (op2);
2546 if (GET_CODE (op2) == CONST_INT)
2548 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2549 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2550 int mask = (1<<n_elts) - 1;
2552 if (!(INTVAL (op2) & mask))
2553 return op1;
2554 if ((INTVAL (op2) & mask) == mask)
2555 return op0;
2557 op0 = avoid_constant_pool_reference (op0);
2558 op1 = avoid_constant_pool_reference (op1);
2559 if (GET_CODE (op0) == CONST_VECTOR
2560 && GET_CODE (op1) == CONST_VECTOR)
2562 rtvec v = rtvec_alloc (n_elts);
2563 unsigned int i;
2565 for (i = 0; i < n_elts; i++)
2566 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2567 ? CONST_VECTOR_ELT (op0, i)
2568 : CONST_VECTOR_ELT (op1, i));
2569 return gen_rtx_CONST_VECTOR (mode, v);
2572 break;
2574 default:
2575 abort ();
2578 return 0;
2581 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2582 Return 0 if no simplifications is possible. */
2584 simplify_subreg (outermode, op, innermode, byte)
2585 rtx op;
2586 unsigned int byte;
2587 enum machine_mode outermode, innermode;
2589 /* Little bit of sanity checking. */
2590 if (innermode == VOIDmode || outermode == VOIDmode
2591 || innermode == BLKmode || outermode == BLKmode)
2592 abort ();
2594 if (GET_MODE (op) != innermode
2595 && GET_MODE (op) != VOIDmode)
2596 abort ();
2598 if (byte % GET_MODE_SIZE (outermode)
2599 || byte >= GET_MODE_SIZE (innermode))
2600 abort ();
2602 if (outermode == innermode && !byte)
2603 return op;
2605 /* Simplify subregs of vector constants. */
2606 if (GET_CODE (op) == CONST_VECTOR)
2608 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2609 const unsigned int offset = byte / elt_size;
2610 rtx elt;
2612 if (GET_MODE_INNER (innermode) == outermode)
2614 elt = CONST_VECTOR_ELT (op, offset);
2616 /* ?? We probably don't need this copy_rtx because constants
2617 can be shared. ?? */
2619 return copy_rtx (elt);
2621 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2622 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2624 return (gen_rtx_CONST_VECTOR
2625 (outermode,
2626 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2627 &CONST_VECTOR_ELT (op, offset))));
2629 else if (GET_MODE_CLASS (outermode) == MODE_INT
2630 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2632 /* This happens when the target register size is smaller then
2633 the vector mode, and we synthesize operations with vectors
2634 of elements that are smaller than the register size. */
2635 HOST_WIDE_INT sum = 0, high = 0;
2636 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2637 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2638 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2639 int shift = BITS_PER_UNIT * elt_size;
2641 for (; n_elts--; i += step)
2643 elt = CONST_VECTOR_ELT (op, i);
2644 if (GET_CODE (elt) == CONST_DOUBLE
2645 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2647 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2648 elt);
2649 if (! elt)
2650 return NULL_RTX;
2652 if (GET_CODE (elt) != CONST_INT)
2653 return NULL_RTX;
2654 /* Avoid overflow. */
2655 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
2656 return NULL_RTX;
2657 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2658 sum = (sum << shift) + INTVAL (elt);
2660 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2661 return GEN_INT (trunc_int_for_mode (sum, outermode));
2662 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2663 return immed_double_const (sum, high, outermode);
2664 else
2665 return NULL_RTX;
2667 else if (GET_MODE_CLASS (outermode) == MODE_INT
2668 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2670 enum machine_mode new_mode
2671 = int_mode_for_mode (GET_MODE_INNER (innermode));
2672 int subbyte = byte % elt_size;
2674 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2675 if (! op)
2676 return NULL_RTX;
2677 return simplify_subreg (outermode, op, new_mode, subbyte);
2679 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2680 /* This shouldn't happen, but let's not do anything stupid. */
2681 return NULL_RTX;
2684 /* Attempt to simplify constant to non-SUBREG expression. */
2685 if (CONSTANT_P (op))
2687 int offset, part;
2688 unsigned HOST_WIDE_INT val = 0;
2690 if (GET_MODE_CLASS (outermode) == MODE_VECTOR_INT
2691 || GET_MODE_CLASS (outermode) == MODE_VECTOR_FLOAT)
2693 /* Construct a CONST_VECTOR from individual subregs. */
2694 enum machine_mode submode = GET_MODE_INNER (outermode);
2695 int subsize = GET_MODE_UNIT_SIZE (outermode);
2696 int i, elts = GET_MODE_NUNITS (outermode);
2697 rtvec v = rtvec_alloc (elts);
2698 rtx elt;
2700 for (i = 0; i < elts; i++, byte += subsize)
2702 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2703 /* ??? It would be nice if we could actually make such subregs
2704 on targets that allow such relocations. */
2705 if (byte >= GET_MODE_UNIT_SIZE (innermode))
2706 elt = CONST0_RTX (submode);
2707 else
2708 elt = simplify_subreg (submode, op, innermode, byte);
2709 if (! elt)
2710 return NULL_RTX;
2711 RTVEC_ELT (v, i) = elt;
2713 return gen_rtx_CONST_VECTOR (outermode, v);
2716 /* ??? This code is partly redundant with code below, but can handle
2717 the subregs of floats and similar corner cases.
2718 Later it we should move all simplification code here and rewrite
2719 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2720 using SIMPLIFY_SUBREG. */
2721 if (subreg_lowpart_offset (outermode, innermode) == byte
2722 && GET_CODE (op) != CONST_VECTOR)
2724 rtx new = gen_lowpart_if_possible (outermode, op);
2725 if (new)
2726 return new;
2729 /* Similar comment as above apply here. */
2730 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2731 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2732 && GET_MODE_CLASS (outermode) == MODE_INT)
2734 rtx new = constant_subword (op,
2735 (byte / UNITS_PER_WORD),
2736 innermode);
2737 if (new)
2738 return new;
2741 if (GET_MODE_CLASS (outermode) != MODE_INT
2742 && GET_MODE_CLASS (outermode) != MODE_CC)
2744 enum machine_mode new_mode = int_mode_for_mode (outermode);
2746 if (new_mode != innermode || byte != 0)
2748 op = simplify_subreg (new_mode, op, innermode, byte);
2749 if (! op)
2750 return NULL_RTX;
2751 return simplify_subreg (outermode, op, new_mode, 0);
2755 offset = byte * BITS_PER_UNIT;
2756 switch (GET_CODE (op))
2758 case CONST_DOUBLE:
2759 if (GET_MODE (op) != VOIDmode)
2760 break;
2762 /* We can't handle this case yet. */
2763 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2764 return NULL_RTX;
2766 part = offset >= HOST_BITS_PER_WIDE_INT;
2767 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2768 && BYTES_BIG_ENDIAN)
2769 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2770 && WORDS_BIG_ENDIAN))
2771 part = !part;
2772 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2773 offset %= HOST_BITS_PER_WIDE_INT;
2775 /* We've already picked the word we want from a double, so
2776 pretend this is actually an integer. */
2777 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2779 /* FALLTHROUGH */
2780 case CONST_INT:
2781 if (GET_CODE (op) == CONST_INT)
2782 val = INTVAL (op);
2784 /* We don't handle synthesizing of non-integral constants yet. */
2785 if (GET_MODE_CLASS (outermode) != MODE_INT)
2786 return NULL_RTX;
2788 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2790 if (WORDS_BIG_ENDIAN)
2791 offset = (GET_MODE_BITSIZE (innermode)
2792 - GET_MODE_BITSIZE (outermode) - offset);
2793 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2794 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2795 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2796 - 2 * (offset % BITS_PER_WORD));
2799 if (offset >= HOST_BITS_PER_WIDE_INT)
2800 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2801 else
2803 val >>= offset;
2804 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2805 val = trunc_int_for_mode (val, outermode);
2806 return GEN_INT (val);
2808 default:
2809 break;
2813 /* Changing mode twice with SUBREG => just change it once,
2814 or not at all if changing back op starting mode. */
2815 if (GET_CODE (op) == SUBREG)
2817 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2818 int final_offset = byte + SUBREG_BYTE (op);
2819 rtx new;
2821 if (outermode == innermostmode
2822 && byte == 0 && SUBREG_BYTE (op) == 0)
2823 return SUBREG_REG (op);
2825 /* The SUBREG_BYTE represents offset, as if the value were stored
2826 in memory. Irritating exception is paradoxical subreg, where
2827 we define SUBREG_BYTE to be 0. On big endian machines, this
2828 value should be negative. For a moment, undo this exception. */
2829 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2831 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2832 if (WORDS_BIG_ENDIAN)
2833 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2834 if (BYTES_BIG_ENDIAN)
2835 final_offset += difference % UNITS_PER_WORD;
2837 if (SUBREG_BYTE (op) == 0
2838 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2840 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2841 if (WORDS_BIG_ENDIAN)
2842 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2843 if (BYTES_BIG_ENDIAN)
2844 final_offset += difference % UNITS_PER_WORD;
2847 /* See whether resulting subreg will be paradoxical. */
2848 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2850 /* In nonparadoxical subregs we can't handle negative offsets. */
2851 if (final_offset < 0)
2852 return NULL_RTX;
2853 /* Bail out in case resulting subreg would be incorrect. */
2854 if (final_offset % GET_MODE_SIZE (outermode)
2855 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2856 return NULL_RTX;
2858 else
2860 int offset = 0;
2861 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2863 /* In paradoxical subreg, see if we are still looking on lower part.
2864 If so, our SUBREG_BYTE will be 0. */
2865 if (WORDS_BIG_ENDIAN)
2866 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2867 if (BYTES_BIG_ENDIAN)
2868 offset += difference % UNITS_PER_WORD;
2869 if (offset == final_offset)
2870 final_offset = 0;
2871 else
2872 return NULL_RTX;
2875 /* Recurse for futher possible simplifications. */
2876 new = simplify_subreg (outermode, SUBREG_REG (op),
2877 GET_MODE (SUBREG_REG (op)),
2878 final_offset);
2879 if (new)
2880 return new;
2881 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2884 /* SUBREG of a hard register => just change the register number
2885 and/or mode. If the hard register is not valid in that mode,
2886 suppress this simplification. If the hard register is the stack,
2887 frame, or argument pointer, leave this as a SUBREG. */
2889 if (REG_P (op)
2890 && (! REG_FUNCTION_VALUE_P (op)
2891 || ! rtx_equal_function_value_matters)
2892 && REGNO (op) < FIRST_PSEUDO_REGISTER
2893 #ifdef CANNOT_CHANGE_MODE_CLASS
2894 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
2895 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2896 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
2897 #endif
2898 && ((reload_completed && !frame_pointer_needed)
2899 || (REGNO (op) != FRAME_POINTER_REGNUM
2900 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2901 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2902 #endif
2904 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2905 && REGNO (op) != ARG_POINTER_REGNUM
2906 #endif
2907 && REGNO (op) != STACK_POINTER_REGNUM)
2909 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2912 /* ??? We do allow it if the current REG is not valid for
2913 its mode. This is a kludge to work around how float/complex
2914 arguments are passed on 32-bit SPARC and should be fixed. */
2915 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2916 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2918 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
2920 /* Propagate original regno. We don't have any way to specify
2921 the offset inside original regno, so do so only for lowpart.
2922 The information is used only by alias analysis that can not
2923 grog partial register anyway. */
2925 if (subreg_lowpart_offset (outermode, innermode) == byte)
2926 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2927 return x;
2931 /* If we have a SUBREG of a register that we are replacing and we are
2932 replacing it with a MEM, make a new MEM and try replacing the
2933 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2934 or if we would be widening it. */
2936 if (GET_CODE (op) == MEM
2937 && ! mode_dependent_address_p (XEXP (op, 0))
2938 /* Allow splitting of volatile memory references in case we don't
2939 have instruction to move the whole thing. */
2940 && (! MEM_VOLATILE_P (op)
2941 || ! have_insn_for (SET, innermode))
2942 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2943 return adjust_address_nv (op, outermode, byte);
2945 /* Handle complex values represented as CONCAT
2946 of real and imaginary part. */
2947 if (GET_CODE (op) == CONCAT)
2949 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2950 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2951 unsigned int final_offset;
2952 rtx res;
2954 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2955 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2956 if (res)
2957 return res;
2958 /* We can at least simplify it by referring directly to the relevant part. */
2959 return gen_rtx_SUBREG (outermode, part, final_offset);
2962 return NULL_RTX;
2964 /* Make a SUBREG operation or equivalent if it folds. */
2967 simplify_gen_subreg (outermode, op, innermode, byte)
2968 rtx op;
2969 unsigned int byte;
2970 enum machine_mode outermode, innermode;
2972 rtx new;
2973 /* Little bit of sanity checking. */
2974 if (innermode == VOIDmode || outermode == VOIDmode
2975 || innermode == BLKmode || outermode == BLKmode)
2976 abort ();
2978 if (GET_MODE (op) != innermode
2979 && GET_MODE (op) != VOIDmode)
2980 abort ();
2982 if (byte % GET_MODE_SIZE (outermode)
2983 || byte >= GET_MODE_SIZE (innermode))
2984 abort ();
2986 if (GET_CODE (op) == QUEUED)
2987 return NULL_RTX;
2989 new = simplify_subreg (outermode, op, innermode, byte);
2990 if (new)
2991 return new;
2993 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2994 return NULL_RTX;
2996 return gen_rtx_SUBREG (outermode, op, byte);
2998 /* Simplify X, an rtx expression.
3000 Return the simplified expression or NULL if no simplifications
3001 were possible.
3003 This is the preferred entry point into the simplification routines;
3004 however, we still allow passes to call the more specific routines.
3006 Right now GCC has three (yes, three) major bodies of RTL simplification
3007 code that need to be unified.
3009 1. fold_rtx in cse.c. This code uses various CSE specific
3010 information to aid in RTL simplification.
3012 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3013 it uses combine specific information to aid in RTL
3014 simplification.
3016 3. The routines in this file.
3019 Long term we want to only have one body of simplification code; to
3020 get to that state I recommend the following steps:
3022 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3023 which are not pass dependent state into these routines.
3025 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3026 use this routine whenever possible.
3028 3. Allow for pass dependent state to be provided to these
3029 routines and add simplifications based on the pass dependent
3030 state. Remove code from cse.c & combine.c that becomes
3031 redundant/dead.
3033 It will take time, but ultimately the compiler will be easier to
3034 maintain and improve. It's totally silly that when we add a
3035 simplification that it needs to be added to 4 places (3 for RTL
3036 simplification and 1 for tree simplification. */
3039 simplify_rtx (x)
3040 rtx x;
3042 enum rtx_code code = GET_CODE (x);
3043 enum machine_mode mode = GET_MODE (x);
3045 switch (GET_RTX_CLASS (code))
3047 case '1':
3048 return simplify_unary_operation (code, mode,
3049 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3050 case 'c':
3051 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3053 rtx tem;
3055 tem = XEXP (x, 0);
3056 XEXP (x, 0) = XEXP (x, 1);
3057 XEXP (x, 1) = tem;
3058 return simplify_binary_operation (code, mode,
3059 XEXP (x, 0), XEXP (x, 1));
3062 case '2':
3063 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3065 case '3':
3066 case 'b':
3067 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3068 XEXP (x, 0), XEXP (x, 1),
3069 XEXP (x, 2));
3071 case '<':
3072 return simplify_relational_operation (code,
3073 ((GET_MODE (XEXP (x, 0))
3074 != VOIDmode)
3075 ? GET_MODE (XEXP (x, 0))
3076 : GET_MODE (XEXP (x, 1))),
3077 XEXP (x, 0), XEXP (x, 1));
3078 case 'x':
3079 if (code == SUBREG)
3080 return simplify_gen_subreg (mode, SUBREG_REG (x),
3081 GET_MODE (SUBREG_REG (x)),
3082 SUBREG_BYTE (x));
3083 if (code == CONSTANT_P_RTX)
3085 if (CONSTANT_P (XEXP (x,0)))
3086 return const1_rtx;
3088 return NULL;
3089 default:
3090 return NULL;