2003-09-04 Eric Christopher <echristo@redhat.com>
[official-gcc.git] / gcc / simplify-rtx.c
blob6ace34809111ee82d07169c6b64e816c94ccb2d4
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static bool associative_constant_p (rtx);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 rtx, rtx);
60 /* Negate a CONST_INT rtx, truncating (because a conversion from a
61 maximally negative number can overflow). */
62 static rtx
63 neg_const_int (enum machine_mode mode, rtx i)
65 return gen_int_mode (- INTVAL (i), mode);
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
72 rtx
73 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
74 rtx op1)
76 rtx tem;
78 /* Put complex operands first and constants second if commutative. */
79 if (GET_RTX_CLASS (code) == 'c'
80 && swap_commutative_operands_p (op0, op1))
81 tem = op0, op0 = op1, op1 = tem;
83 /* If this simplifies, do it. */
84 tem = simplify_binary_operation (code, mode, op0, op1);
85 if (tem)
86 return tem;
88 /* Handle addition and subtraction specially. Otherwise, just form
89 the operation. */
91 if (code == PLUS || code == MINUS)
93 tem = simplify_plus_minus (code, mode, op0, op1, 1);
94 if (tem)
95 return tem;
98 return gen_rtx_fmt_ee (code, mode, op0, op1);
101 /* If X is a MEM referencing the constant pool, return the real value.
102 Otherwise return X. */
104 avoid_constant_pool_reference (rtx x)
106 rtx c, tmp, addr;
107 enum machine_mode cmode;
109 switch (GET_CODE (x))
111 case MEM:
112 break;
114 case FLOAT_EXTEND:
115 /* Handle float extensions of constant pool references. */
116 tmp = XEXP (x, 0);
117 c = avoid_constant_pool_reference (tmp);
118 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
120 REAL_VALUE_TYPE d;
122 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
123 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
125 return x;
127 default:
128 return x;
131 addr = XEXP (x, 0);
133 /* Call target hook to avoid the effects of -fpic etc.... */
134 addr = (*targetm.delegitimize_address) (addr);
136 if (GET_CODE (addr) == LO_SUM)
137 addr = XEXP (addr, 1);
139 if (GET_CODE (addr) != SYMBOL_REF
140 || ! CONSTANT_POOL_ADDRESS_P (addr))
141 return x;
143 c = get_pool_constant (addr);
144 cmode = get_pool_mode (addr);
146 /* If we're accessing the constant in a different mode than it was
147 originally stored, attempt to fix that up via subreg simplifications.
148 If that fails we have no choice but to return the original memory. */
149 if (cmode != GET_MODE (x))
151 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
152 return c ? c : x;
155 return c;
158 /* Make a unary operation by first seeing if it folds and otherwise making
159 the specified operation. */
162 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
163 enum machine_mode op_mode)
165 rtx tem;
167 /* If this simplifies, use it. */
168 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
169 return tem;
171 return gen_rtx_fmt_e (code, mode, op);
174 /* Likewise for ternary operations. */
177 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
178 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
180 rtx tem;
182 /* If this simplifies, use it. */
183 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
184 op0, op1, op2)))
185 return tem;
187 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
195 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
196 enum machine_mode cmp_mode, rtx op0, rtx op1)
198 rtx tem;
200 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
201 return tem;
203 /* For the following tests, ensure const0_rtx is op1. */
204 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
205 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
207 /* If op0 is a compare, extract the comparison arguments from it. */
208 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
209 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
211 /* If op0 is a comparison, extract the comparison arguments form it. */
212 if (code == NE && op1 == const0_rtx
213 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
214 return op0;
215 else if (code == EQ && op1 == const0_rtx)
217 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
218 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
219 if (new != UNKNOWN)
221 code = new;
222 mode = cmp_mode;
223 op1 = XEXP (op0, 1);
224 op0 = XEXP (op0, 0);
228 /* Put complex operands first and constants second. */
229 if (swap_commutative_operands_p (op0, op1))
230 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
232 return gen_rtx_fmt_ee (code, mode, op0, op1);
235 /* Replace all occurrences of OLD in X with NEW and try to simplify the
236 resulting RTX. Return a new RTX which is as simplified as possible. */
239 simplify_replace_rtx (rtx x, rtx old, rtx new)
241 enum rtx_code code = GET_CODE (x);
242 enum machine_mode mode = GET_MODE (x);
244 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
245 to build a new expression substituting recursively. If we can't do
246 anything, return our input. */
248 if (x == old)
249 return new;
251 switch (GET_RTX_CLASS (code))
253 case '1':
255 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
256 rtx op = (XEXP (x, 0) == old
257 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
259 return simplify_gen_unary (code, mode, op, op_mode);
262 case '2':
263 case 'c':
264 return
265 simplify_gen_binary (code, mode,
266 simplify_replace_rtx (XEXP (x, 0), old, new),
267 simplify_replace_rtx (XEXP (x, 1), old, new));
268 case '<':
270 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
271 ? GET_MODE (XEXP (x, 0))
272 : GET_MODE (XEXP (x, 1)));
273 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
274 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
275 rtx temp = simplify_gen_relational (code, mode,
276 (op_mode != VOIDmode
277 ? op_mode
278 : GET_MODE (op0) != VOIDmode
279 ? GET_MODE (op0)
280 : GET_MODE (op1)),
281 op0, op1);
282 #ifdef FLOAT_STORE_FLAG_VALUE
283 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
285 if (temp == const0_rtx)
286 temp = CONST0_RTX (mode);
287 else if (temp == const_true_rtx)
288 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
289 mode);
291 #endif
292 return temp;
295 case '3':
296 case 'b':
298 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
299 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
301 return
302 simplify_gen_ternary (code, mode,
303 (op_mode != VOIDmode
304 ? op_mode
305 : GET_MODE (op0)),
306 op0,
307 simplify_replace_rtx (XEXP (x, 1), old, new),
308 simplify_replace_rtx (XEXP (x, 2), old, new));
311 case 'x':
312 /* The only case we try to handle is a SUBREG. */
313 if (code == SUBREG)
315 rtx exp;
316 exp = simplify_gen_subreg (GET_MODE (x),
317 simplify_replace_rtx (SUBREG_REG (x),
318 old, new),
319 GET_MODE (SUBREG_REG (x)),
320 SUBREG_BYTE (x));
321 if (exp)
322 x = exp;
324 return x;
326 case 'o':
327 if (code == MEM)
328 return replace_equiv_address_nv (x,
329 simplify_replace_rtx (XEXP (x, 0),
330 old, new));
331 else if (code == LO_SUM)
333 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
334 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
336 /* (lo_sum (high x) x) -> x */
337 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
338 return op1;
340 return gen_rtx_LO_SUM (mode, op0, op1);
342 else if (code == REG)
344 if (REG_P (old) && REGNO (x) == REGNO (old))
345 return new;
348 return x;
350 default:
351 return x;
353 return x;
356 /* Try to simplify a unary operation CODE whose output mode is to be
357 MODE with input operand OP whose mode was originally OP_MODE.
358 Return zero if no simplification can be made. */
360 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
361 rtx op, enum machine_mode op_mode)
363 unsigned int width = GET_MODE_BITSIZE (mode);
364 rtx trueop = avoid_constant_pool_reference (op);
366 if (code == VEC_DUPLICATE)
368 if (!VECTOR_MODE_P (mode))
369 abort ();
370 if (GET_MODE (trueop) != VOIDmode
371 && !VECTOR_MODE_P (GET_MODE (trueop))
372 && GET_MODE_INNER (mode) != GET_MODE (trueop))
373 abort ();
374 if (GET_MODE (trueop) != VOIDmode
375 && VECTOR_MODE_P (GET_MODE (trueop))
376 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
377 abort ();
378 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
379 || GET_CODE (trueop) == CONST_VECTOR)
381 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
382 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
383 rtvec v = rtvec_alloc (n_elts);
384 unsigned int i;
386 if (GET_CODE (trueop) != CONST_VECTOR)
387 for (i = 0; i < n_elts; i++)
388 RTVEC_ELT (v, i) = trueop;
389 else
391 enum machine_mode inmode = GET_MODE (trueop);
392 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
393 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
395 if (in_n_elts >= n_elts || n_elts % in_n_elts)
396 abort ();
397 for (i = 0; i < n_elts; i++)
398 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
400 return gen_rtx_CONST_VECTOR (mode, v);
404 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
406 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
407 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
408 enum machine_mode opmode = GET_MODE (trueop);
409 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
410 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
411 rtvec v = rtvec_alloc (n_elts);
412 unsigned int i;
414 if (op_n_elts != n_elts)
415 abort ();
417 for (i = 0; i < n_elts; i++)
419 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
420 CONST_VECTOR_ELT (trueop, i),
421 GET_MODE_INNER (opmode));
422 if (!x)
423 return 0;
424 RTVEC_ELT (v, i) = x;
426 return gen_rtx_CONST_VECTOR (mode, v);
429 /* The order of these tests is critical so that, for example, we don't
430 check the wrong mode (input vs. output) for a conversion operation,
431 such as FIX. At some point, this should be simplified. */
433 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
434 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
436 HOST_WIDE_INT hv, lv;
437 REAL_VALUE_TYPE d;
439 if (GET_CODE (trueop) == CONST_INT)
440 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
441 else
442 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
444 REAL_VALUE_FROM_INT (d, lv, hv, mode);
445 d = real_value_truncate (mode, d);
446 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
448 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
449 && (GET_CODE (trueop) == CONST_DOUBLE
450 || GET_CODE (trueop) == CONST_INT))
452 HOST_WIDE_INT hv, lv;
453 REAL_VALUE_TYPE d;
455 if (GET_CODE (trueop) == CONST_INT)
456 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
457 else
458 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
460 if (op_mode == VOIDmode)
462 /* We don't know how to interpret negative-looking numbers in
463 this case, so don't try to fold those. */
464 if (hv < 0)
465 return 0;
467 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
469 else
470 hv = 0, lv &= GET_MODE_MASK (op_mode);
472 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
473 d = real_value_truncate (mode, d);
474 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
477 if (GET_CODE (trueop) == CONST_INT
478 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
480 HOST_WIDE_INT arg0 = INTVAL (trueop);
481 HOST_WIDE_INT val;
483 switch (code)
485 case NOT:
486 val = ~ arg0;
487 break;
489 case NEG:
490 val = - arg0;
491 break;
493 case ABS:
494 val = (arg0 >= 0 ? arg0 : - arg0);
495 break;
497 case FFS:
498 /* Don't use ffs here. Instead, get low order bit and then its
499 number. If arg0 is zero, this will return 0, as desired. */
500 arg0 &= GET_MODE_MASK (mode);
501 val = exact_log2 (arg0 & (- arg0)) + 1;
502 break;
504 case CLZ:
505 arg0 &= GET_MODE_MASK (mode);
506 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
508 else
509 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
510 break;
512 case CTZ:
513 arg0 &= GET_MODE_MASK (mode);
514 if (arg0 == 0)
516 /* Even if the value at zero is undefined, we have to come
517 up with some replacement. Seems good enough. */
518 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
519 val = GET_MODE_BITSIZE (mode);
521 else
522 val = exact_log2 (arg0 & -arg0);
523 break;
525 case POPCOUNT:
526 arg0 &= GET_MODE_MASK (mode);
527 val = 0;
528 while (arg0)
529 val++, arg0 &= arg0 - 1;
530 break;
532 case PARITY:
533 arg0 &= GET_MODE_MASK (mode);
534 val = 0;
535 while (arg0)
536 val++, arg0 &= arg0 - 1;
537 val &= 1;
538 break;
540 case TRUNCATE:
541 val = arg0;
542 break;
544 case ZERO_EXTEND:
545 /* When zero-extending a CONST_INT, we need to know its
546 original mode. */
547 if (op_mode == VOIDmode)
548 abort ();
549 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
551 /* If we were really extending the mode,
552 we would have to distinguish between zero-extension
553 and sign-extension. */
554 if (width != GET_MODE_BITSIZE (op_mode))
555 abort ();
556 val = arg0;
558 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
559 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
560 else
561 return 0;
562 break;
564 case SIGN_EXTEND:
565 if (op_mode == VOIDmode)
566 op_mode = mode;
567 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
569 /* If we were really extending the mode,
570 we would have to distinguish between zero-extension
571 and sign-extension. */
572 if (width != GET_MODE_BITSIZE (op_mode))
573 abort ();
574 val = arg0;
576 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
579 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
580 if (val
581 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
582 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
584 else
585 return 0;
586 break;
588 case SQRT:
589 case FLOAT_EXTEND:
590 case FLOAT_TRUNCATE:
591 case SS_TRUNCATE:
592 case US_TRUNCATE:
593 return 0;
595 default:
596 abort ();
599 val = trunc_int_for_mode (val, mode);
601 return GEN_INT (val);
604 /* We can do some operations on integer CONST_DOUBLEs. Also allow
605 for a DImode operation on a CONST_INT. */
606 else if (GET_MODE (trueop) == VOIDmode
607 && width <= HOST_BITS_PER_WIDE_INT * 2
608 && (GET_CODE (trueop) == CONST_DOUBLE
609 || GET_CODE (trueop) == CONST_INT))
611 unsigned HOST_WIDE_INT l1, lv;
612 HOST_WIDE_INT h1, hv;
614 if (GET_CODE (trueop) == CONST_DOUBLE)
615 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
616 else
617 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
619 switch (code)
621 case NOT:
622 lv = ~ l1;
623 hv = ~ h1;
624 break;
626 case NEG:
627 neg_double (l1, h1, &lv, &hv);
628 break;
630 case ABS:
631 if (h1 < 0)
632 neg_double (l1, h1, &lv, &hv);
633 else
634 lv = l1, hv = h1;
635 break;
637 case FFS:
638 hv = 0;
639 if (l1 == 0)
641 if (h1 == 0)
642 lv = 0;
643 else
644 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
646 else
647 lv = exact_log2 (l1 & -l1) + 1;
648 break;
650 case CLZ:
651 hv = 0;
652 if (h1 != 0)
653 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
654 - HOST_BITS_PER_WIDE_INT;
655 else if (l1 != 0)
656 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
657 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
658 lv = GET_MODE_BITSIZE (mode);
659 break;
661 case CTZ:
662 hv = 0;
663 if (l1 != 0)
664 lv = exact_log2 (l1 & -l1);
665 else if (h1 != 0)
666 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
667 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
668 lv = GET_MODE_BITSIZE (mode);
669 break;
671 case POPCOUNT:
672 hv = 0;
673 lv = 0;
674 while (l1)
675 lv++, l1 &= l1 - 1;
676 while (h1)
677 lv++, h1 &= h1 - 1;
678 break;
680 case PARITY:
681 hv = 0;
682 lv = 0;
683 while (l1)
684 lv++, l1 &= l1 - 1;
685 while (h1)
686 lv++, h1 &= h1 - 1;
687 lv &= 1;
688 break;
690 case TRUNCATE:
691 /* This is just a change-of-mode, so do nothing. */
692 lv = l1, hv = h1;
693 break;
695 case ZERO_EXTEND:
696 if (op_mode == VOIDmode)
697 abort ();
699 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
700 return 0;
702 hv = 0;
703 lv = l1 & GET_MODE_MASK (op_mode);
704 break;
706 case SIGN_EXTEND:
707 if (op_mode == VOIDmode
708 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
709 return 0;
710 else
712 lv = l1 & GET_MODE_MASK (op_mode);
713 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
714 && (lv & ((HOST_WIDE_INT) 1
715 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
716 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
718 hv = HWI_SIGN_EXTEND (lv);
720 break;
722 case SQRT:
723 return 0;
725 default:
726 return 0;
729 return immed_double_const (lv, hv, mode);
732 else if (GET_CODE (trueop) == CONST_DOUBLE
733 && GET_MODE_CLASS (mode) == MODE_FLOAT)
735 REAL_VALUE_TYPE d, t;
736 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
738 switch (code)
740 case SQRT:
741 if (HONOR_SNANS (mode) && real_isnan (&d))
742 return 0;
743 real_sqrt (&t, mode, &d);
744 d = t;
745 break;
746 case ABS:
747 d = REAL_VALUE_ABS (d);
748 break;
749 case NEG:
750 d = REAL_VALUE_NEGATE (d);
751 break;
752 case FLOAT_TRUNCATE:
753 d = real_value_truncate (mode, d);
754 break;
755 case FLOAT_EXTEND:
756 /* All this does is change the mode. */
757 break;
758 case FIX:
759 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
760 break;
762 default:
763 abort ();
765 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
768 else if (GET_CODE (trueop) == CONST_DOUBLE
769 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
770 && GET_MODE_CLASS (mode) == MODE_INT
771 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
773 HOST_WIDE_INT i;
774 REAL_VALUE_TYPE d;
775 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
776 switch (code)
778 case FIX: i = REAL_VALUE_FIX (d); break;
779 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
780 default:
781 abort ();
783 return gen_int_mode (i, mode);
786 /* This was formerly used only for non-IEEE float.
787 eggert@twinsun.com says it is safe for IEEE also. */
788 else
790 enum rtx_code reversed;
791 /* There are some simplifications we can do even if the operands
792 aren't constant. */
793 switch (code)
795 case NOT:
796 /* (not (not X)) == X. */
797 if (GET_CODE (op) == NOT)
798 return XEXP (op, 0);
800 /* (not (eq X Y)) == (ne X Y), etc. */
801 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
802 && ((reversed = reversed_comparison_code (op, NULL_RTX))
803 != UNKNOWN))
804 return gen_rtx_fmt_ee (reversed,
805 op_mode, XEXP (op, 0), XEXP (op, 1));
806 break;
808 case NEG:
809 /* (neg (neg X)) == X. */
810 if (GET_CODE (op) == NEG)
811 return XEXP (op, 0);
812 break;
814 case SIGN_EXTEND:
815 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
816 becomes just the MINUS if its mode is MODE. This allows
817 folding switch statements on machines using casesi (such as
818 the VAX). */
819 if (GET_CODE (op) == TRUNCATE
820 && GET_MODE (XEXP (op, 0)) == mode
821 && GET_CODE (XEXP (op, 0)) == MINUS
822 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
823 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
824 return XEXP (op, 0);
826 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
827 if (! POINTERS_EXTEND_UNSIGNED
828 && mode == Pmode && GET_MODE (op) == ptr_mode
829 && (CONSTANT_P (op)
830 || (GET_CODE (op) == SUBREG
831 && GET_CODE (SUBREG_REG (op)) == REG
832 && REG_POINTER (SUBREG_REG (op))
833 && GET_MODE (SUBREG_REG (op)) == Pmode)))
834 return convert_memory_address (Pmode, op);
835 #endif
836 break;
838 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
839 case ZERO_EXTEND:
840 if (POINTERS_EXTEND_UNSIGNED > 0
841 && mode == Pmode && GET_MODE (op) == ptr_mode
842 && (CONSTANT_P (op)
843 || (GET_CODE (op) == SUBREG
844 && GET_CODE (SUBREG_REG (op)) == REG
845 && REG_POINTER (SUBREG_REG (op))
846 && GET_MODE (SUBREG_REG (op)) == Pmode)))
847 return convert_memory_address (Pmode, op);
848 break;
849 #endif
851 default:
852 break;
855 return 0;
859 /* Subroutine of simplify_associative_operation. Return true if rtx OP
860 is a suitable integer or floating point immediate constant. */
861 static bool
862 associative_constant_p (rtx op)
864 if (GET_CODE (op) == CONST_INT
865 || GET_CODE (op) == CONST_DOUBLE)
866 return true;
867 op = avoid_constant_pool_reference (op);
868 return GET_CODE (op) == CONST_INT
869 || GET_CODE (op) == CONST_DOUBLE;
872 /* Subroutine of simplify_binary_operation to simplify an associative
873 binary operation CODE with result mode MODE, operating on OP0 and OP1.
874 Return 0 if no simplification is possible. */
875 static rtx
876 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
877 rtx op0, rtx op1)
879 rtx tem;
881 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
882 if (GET_CODE (op0) == code
883 && associative_constant_p (op1)
884 && associative_constant_p (XEXP (op0, 1)))
886 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
887 if (! tem)
888 return tem;
889 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
892 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
893 if (GET_CODE (op0) == code
894 && GET_CODE (op1) == code
895 && associative_constant_p (XEXP (op0, 1))
896 && associative_constant_p (XEXP (op1, 1)))
898 rtx c = simplify_binary_operation (code, mode,
899 XEXP (op0, 1), XEXP (op1, 1));
900 if (! c)
901 return 0;
902 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
903 return simplify_gen_binary (code, mode, tem, c);
906 /* Canonicalize (x op c) op y as (x op y) op c. */
907 if (GET_CODE (op0) == code
908 && associative_constant_p (XEXP (op0, 1)))
910 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
911 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
914 /* Canonicalize x op (y op c) as (x op y) op c. */
915 if (GET_CODE (op1) == code
916 && associative_constant_p (XEXP (op1, 1)))
918 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
919 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
922 return 0;
925 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
926 and OP1. Return 0 if no simplification is possible.
928 Don't use this for relational operations such as EQ or LT.
929 Use simplify_relational_operation instead. */
931 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
932 rtx op0, rtx op1)
934 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
935 HOST_WIDE_INT val;
936 unsigned int width = GET_MODE_BITSIZE (mode);
937 rtx tem;
938 rtx trueop0 = avoid_constant_pool_reference (op0);
939 rtx trueop1 = avoid_constant_pool_reference (op1);
941 /* Relational operations don't work here. We must know the mode
942 of the operands in order to do the comparison correctly.
943 Assuming a full word can give incorrect results.
944 Consider comparing 128 with -128 in QImode. */
946 if (GET_RTX_CLASS (code) == '<')
947 abort ();
949 /* Make sure the constant is second. */
950 if (GET_RTX_CLASS (code) == 'c'
951 && swap_commutative_operands_p (trueop0, trueop1))
953 tem = op0, op0 = op1, op1 = tem;
954 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
957 if (VECTOR_MODE_P (mode)
958 && GET_CODE (trueop0) == CONST_VECTOR
959 && GET_CODE (trueop1) == CONST_VECTOR)
961 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
962 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
963 enum machine_mode op0mode = GET_MODE (trueop0);
964 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
965 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
966 enum machine_mode op1mode = GET_MODE (trueop1);
967 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
968 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
969 rtvec v = rtvec_alloc (n_elts);
970 unsigned int i;
972 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
973 abort ();
975 for (i = 0; i < n_elts; i++)
977 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
978 CONST_VECTOR_ELT (trueop0, i),
979 CONST_VECTOR_ELT (trueop1, i));
980 if (!x)
981 return 0;
982 RTVEC_ELT (v, i) = x;
985 return gen_rtx_CONST_VECTOR (mode, v);
988 if (GET_MODE_CLASS (mode) == MODE_FLOAT
989 && GET_CODE (trueop0) == CONST_DOUBLE
990 && GET_CODE (trueop1) == CONST_DOUBLE
991 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
993 REAL_VALUE_TYPE f0, f1, value;
995 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
996 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
997 f0 = real_value_truncate (mode, f0);
998 f1 = real_value_truncate (mode, f1);
1000 if (HONOR_SNANS (mode)
1001 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1002 return 0;
1004 if (code == DIV
1005 && REAL_VALUES_EQUAL (f1, dconst0)
1006 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1007 return 0;
1009 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1011 value = real_value_truncate (mode, value);
1012 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1015 /* We can fold some multi-word operations. */
1016 if (GET_MODE_CLASS (mode) == MODE_INT
1017 && width == HOST_BITS_PER_WIDE_INT * 2
1018 && (GET_CODE (trueop0) == CONST_DOUBLE
1019 || GET_CODE (trueop0) == CONST_INT)
1020 && (GET_CODE (trueop1) == CONST_DOUBLE
1021 || GET_CODE (trueop1) == CONST_INT))
1023 unsigned HOST_WIDE_INT l1, l2, lv;
1024 HOST_WIDE_INT h1, h2, hv;
1026 if (GET_CODE (trueop0) == CONST_DOUBLE)
1027 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1028 else
1029 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1031 if (GET_CODE (trueop1) == CONST_DOUBLE)
1032 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1033 else
1034 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1036 switch (code)
1038 case MINUS:
1039 /* A - B == A + (-B). */
1040 neg_double (l2, h2, &lv, &hv);
1041 l2 = lv, h2 = hv;
1043 /* Fall through.... */
1045 case PLUS:
1046 add_double (l1, h1, l2, h2, &lv, &hv);
1047 break;
1049 case MULT:
1050 mul_double (l1, h1, l2, h2, &lv, &hv);
1051 break;
1053 case DIV: case MOD: case UDIV: case UMOD:
1054 /* We'd need to include tree.h to do this and it doesn't seem worth
1055 it. */
1056 return 0;
1058 case AND:
1059 lv = l1 & l2, hv = h1 & h2;
1060 break;
1062 case IOR:
1063 lv = l1 | l2, hv = h1 | h2;
1064 break;
1066 case XOR:
1067 lv = l1 ^ l2, hv = h1 ^ h2;
1068 break;
1070 case SMIN:
1071 if (h1 < h2
1072 || (h1 == h2
1073 && ((unsigned HOST_WIDE_INT) l1
1074 < (unsigned HOST_WIDE_INT) l2)))
1075 lv = l1, hv = h1;
1076 else
1077 lv = l2, hv = h2;
1078 break;
1080 case SMAX:
1081 if (h1 > h2
1082 || (h1 == h2
1083 && ((unsigned HOST_WIDE_INT) l1
1084 > (unsigned HOST_WIDE_INT) l2)))
1085 lv = l1, hv = h1;
1086 else
1087 lv = l2, hv = h2;
1088 break;
1090 case UMIN:
1091 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1092 || (h1 == h2
1093 && ((unsigned HOST_WIDE_INT) l1
1094 < (unsigned HOST_WIDE_INT) l2)))
1095 lv = l1, hv = h1;
1096 else
1097 lv = l2, hv = h2;
1098 break;
1100 case UMAX:
1101 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1102 || (h1 == h2
1103 && ((unsigned HOST_WIDE_INT) l1
1104 > (unsigned HOST_WIDE_INT) l2)))
1105 lv = l1, hv = h1;
1106 else
1107 lv = l2, hv = h2;
1108 break;
1110 case LSHIFTRT: case ASHIFTRT:
1111 case ASHIFT:
1112 case ROTATE: case ROTATERT:
1113 #ifdef SHIFT_COUNT_TRUNCATED
1114 if (SHIFT_COUNT_TRUNCATED)
1115 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1116 #endif
1118 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1119 return 0;
1121 if (code == LSHIFTRT || code == ASHIFTRT)
1122 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1123 code == ASHIFTRT);
1124 else if (code == ASHIFT)
1125 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1126 else if (code == ROTATE)
1127 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1128 else /* code == ROTATERT */
1129 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1130 break;
1132 default:
1133 return 0;
1136 return immed_double_const (lv, hv, mode);
1139 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1140 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1142 /* Even if we can't compute a constant result,
1143 there are some cases worth simplifying. */
1145 switch (code)
1147 case PLUS:
1148 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1149 when x is NaN, infinite, or finite and nonzero. They aren't
1150 when x is -0 and the rounding mode is not towards -infinity,
1151 since (-0) + 0 is then 0. */
1152 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1153 return op0;
1155 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1156 transformations are safe even for IEEE. */
1157 if (GET_CODE (op0) == NEG)
1158 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1159 else if (GET_CODE (op1) == NEG)
1160 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1162 /* (~a) + 1 -> -a */
1163 if (INTEGRAL_MODE_P (mode)
1164 && GET_CODE (op0) == NOT
1165 && trueop1 == const1_rtx)
1166 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1168 /* Handle both-operands-constant cases. We can only add
1169 CONST_INTs to constants since the sum of relocatable symbols
1170 can't be handled by most assemblers. Don't add CONST_INT
1171 to CONST_INT since overflow won't be computed properly if wider
1172 than HOST_BITS_PER_WIDE_INT. */
1174 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1175 && GET_CODE (op1) == CONST_INT)
1176 return plus_constant (op0, INTVAL (op1));
1177 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1178 && GET_CODE (op0) == CONST_INT)
1179 return plus_constant (op1, INTVAL (op0));
1181 /* See if this is something like X * C - X or vice versa or
1182 if the multiplication is written as a shift. If so, we can
1183 distribute and make a new multiply, shift, or maybe just
1184 have X (if C is 2 in the example above). But don't make
1185 real multiply if we didn't have one before. */
1187 if (! FLOAT_MODE_P (mode))
1189 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1190 rtx lhs = op0, rhs = op1;
1191 int had_mult = 0;
1193 if (GET_CODE (lhs) == NEG)
1194 coeff0 = -1, lhs = XEXP (lhs, 0);
1195 else if (GET_CODE (lhs) == MULT
1196 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1198 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1199 had_mult = 1;
1201 else if (GET_CODE (lhs) == ASHIFT
1202 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1203 && INTVAL (XEXP (lhs, 1)) >= 0
1204 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1206 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1207 lhs = XEXP (lhs, 0);
1210 if (GET_CODE (rhs) == NEG)
1211 coeff1 = -1, rhs = XEXP (rhs, 0);
1212 else if (GET_CODE (rhs) == MULT
1213 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1215 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1216 had_mult = 1;
1218 else if (GET_CODE (rhs) == ASHIFT
1219 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1220 && INTVAL (XEXP (rhs, 1)) >= 0
1221 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1223 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1224 rhs = XEXP (rhs, 0);
1227 if (rtx_equal_p (lhs, rhs))
1229 tem = simplify_gen_binary (MULT, mode, lhs,
1230 GEN_INT (coeff0 + coeff1));
1231 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1235 /* If one of the operands is a PLUS or a MINUS, see if we can
1236 simplify this by the associative law.
1237 Don't use the associative law for floating point.
1238 The inaccuracy makes it nonassociative,
1239 and subtle programs can break if operations are associated. */
1241 if (INTEGRAL_MODE_P (mode)
1242 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1243 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1244 || (GET_CODE (op0) == CONST
1245 && GET_CODE (XEXP (op0, 0)) == PLUS)
1246 || (GET_CODE (op1) == CONST
1247 && GET_CODE (XEXP (op1, 0)) == PLUS))
1248 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1249 return tem;
1251 /* Reassociate floating point addition only when the user
1252 specifies unsafe math optimizations. */
1253 if (FLOAT_MODE_P (mode)
1254 && flag_unsafe_math_optimizations)
1256 tem = simplify_associative_operation (code, mode, op0, op1);
1257 if (tem)
1258 return tem;
1260 break;
1262 case COMPARE:
1263 #ifdef HAVE_cc0
1264 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1265 using cc0, in which case we want to leave it as a COMPARE
1266 so we can distinguish it from a register-register-copy.
1268 In IEEE floating point, x-0 is not the same as x. */
1270 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1271 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1272 && trueop1 == CONST0_RTX (mode))
1273 return op0;
1274 #endif
1276 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1277 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1278 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1279 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1281 rtx xop00 = XEXP (op0, 0);
1282 rtx xop10 = XEXP (op1, 0);
1284 #ifdef HAVE_cc0
1285 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1286 #else
1287 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1288 && GET_MODE (xop00) == GET_MODE (xop10)
1289 && REGNO (xop00) == REGNO (xop10)
1290 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1291 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1292 #endif
1293 return xop00;
1295 break;
1297 case MINUS:
1298 /* We can't assume x-x is 0 even with non-IEEE floating point,
1299 but since it is zero except in very strange circumstances, we
1300 will treat it as zero with -funsafe-math-optimizations. */
1301 if (rtx_equal_p (trueop0, trueop1)
1302 && ! side_effects_p (op0)
1303 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1304 return CONST0_RTX (mode);
1306 /* Change subtraction from zero into negation. (0 - x) is the
1307 same as -x when x is NaN, infinite, or finite and nonzero.
1308 But if the mode has signed zeros, and does not round towards
1309 -infinity, then 0 - 0 is 0, not -0. */
1310 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1311 return simplify_gen_unary (NEG, mode, op1, mode);
1313 /* (-1 - a) is ~a. */
1314 if (trueop0 == constm1_rtx)
1315 return simplify_gen_unary (NOT, mode, op1, mode);
1317 /* Subtracting 0 has no effect unless the mode has signed zeros
1318 and supports rounding towards -infinity. In such a case,
1319 0 - 0 is -0. */
1320 if (!(HONOR_SIGNED_ZEROS (mode)
1321 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1322 && trueop1 == CONST0_RTX (mode))
1323 return op0;
1325 /* See if this is something like X * C - X or vice versa or
1326 if the multiplication is written as a shift. If so, we can
1327 distribute and make a new multiply, shift, or maybe just
1328 have X (if C is 2 in the example above). But don't make
1329 real multiply if we didn't have one before. */
1331 if (! FLOAT_MODE_P (mode))
1333 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1334 rtx lhs = op0, rhs = op1;
1335 int had_mult = 0;
1337 if (GET_CODE (lhs) == NEG)
1338 coeff0 = -1, lhs = XEXP (lhs, 0);
1339 else if (GET_CODE (lhs) == MULT
1340 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1342 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1343 had_mult = 1;
1345 else if (GET_CODE (lhs) == ASHIFT
1346 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1347 && INTVAL (XEXP (lhs, 1)) >= 0
1348 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1350 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1351 lhs = XEXP (lhs, 0);
1354 if (GET_CODE (rhs) == NEG)
1355 coeff1 = - 1, rhs = XEXP (rhs, 0);
1356 else if (GET_CODE (rhs) == MULT
1357 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1359 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1360 had_mult = 1;
1362 else if (GET_CODE (rhs) == ASHIFT
1363 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1364 && INTVAL (XEXP (rhs, 1)) >= 0
1365 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1367 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1368 rhs = XEXP (rhs, 0);
1371 if (rtx_equal_p (lhs, rhs))
1373 tem = simplify_gen_binary (MULT, mode, lhs,
1374 GEN_INT (coeff0 - coeff1));
1375 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1379 /* (a - (-b)) -> (a + b). True even for IEEE. */
1380 if (GET_CODE (op1) == NEG)
1381 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1383 /* If one of the operands is a PLUS or a MINUS, see if we can
1384 simplify this by the associative law.
1385 Don't use the associative law for floating point.
1386 The inaccuracy makes it nonassociative,
1387 and subtle programs can break if operations are associated. */
1389 if (INTEGRAL_MODE_P (mode)
1390 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1391 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1392 || (GET_CODE (op0) == CONST
1393 && GET_CODE (XEXP (op0, 0)) == PLUS)
1394 || (GET_CODE (op1) == CONST
1395 && GET_CODE (XEXP (op1, 0)) == PLUS))
1396 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1397 return tem;
1399 /* Don't let a relocatable value get a negative coeff. */
1400 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1401 return simplify_gen_binary (PLUS, mode,
1402 op0,
1403 neg_const_int (mode, op1));
1405 /* (x - (x & y)) -> (x & ~y) */
1406 if (GET_CODE (op1) == AND)
1408 if (rtx_equal_p (op0, XEXP (op1, 0)))
1410 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1411 GET_MODE (XEXP (op1, 1)));
1412 return simplify_gen_binary (AND, mode, op0, tem);
1414 if (rtx_equal_p (op0, XEXP (op1, 1)))
1416 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1417 GET_MODE (XEXP (op1, 0)));
1418 return simplify_gen_binary (AND, mode, op0, tem);
1421 break;
1423 case MULT:
1424 if (trueop1 == constm1_rtx)
1425 return simplify_gen_unary (NEG, mode, op0, mode);
1427 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1428 x is NaN, since x * 0 is then also NaN. Nor is it valid
1429 when the mode has signed zeros, since multiplying a negative
1430 number by 0 will give -0, not 0. */
1431 if (!HONOR_NANS (mode)
1432 && !HONOR_SIGNED_ZEROS (mode)
1433 && trueop1 == CONST0_RTX (mode)
1434 && ! side_effects_p (op0))
1435 return op1;
1437 /* In IEEE floating point, x*1 is not equivalent to x for
1438 signalling NaNs. */
1439 if (!HONOR_SNANS (mode)
1440 && trueop1 == CONST1_RTX (mode))
1441 return op0;
1443 /* Convert multiply by constant power of two into shift unless
1444 we are still generating RTL. This test is a kludge. */
1445 if (GET_CODE (trueop1) == CONST_INT
1446 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1447 /* If the mode is larger than the host word size, and the
1448 uppermost bit is set, then this isn't a power of two due
1449 to implicit sign extension. */
1450 && (width <= HOST_BITS_PER_WIDE_INT
1451 || val != HOST_BITS_PER_WIDE_INT - 1)
1452 && ! rtx_equal_function_value_matters)
1453 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1455 /* x*2 is x+x and x*(-1) is -x */
1456 if (GET_CODE (trueop1) == CONST_DOUBLE
1457 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1458 && GET_MODE (op0) == mode)
1460 REAL_VALUE_TYPE d;
1461 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1463 if (REAL_VALUES_EQUAL (d, dconst2))
1464 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1466 if (REAL_VALUES_EQUAL (d, dconstm1))
1467 return simplify_gen_unary (NEG, mode, op0, mode);
1470 /* Reassociate multiplication, but for floating point MULTs
1471 only when the user specifies unsafe math optimizations. */
1472 if (! FLOAT_MODE_P (mode)
1473 || flag_unsafe_math_optimizations)
1475 tem = simplify_associative_operation (code, mode, op0, op1);
1476 if (tem)
1477 return tem;
1479 break;
1481 case IOR:
1482 if (trueop1 == const0_rtx)
1483 return op0;
1484 if (GET_CODE (trueop1) == CONST_INT
1485 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1486 == GET_MODE_MASK (mode)))
1487 return op1;
1488 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1489 return op0;
1490 /* A | (~A) -> -1 */
1491 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1492 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1493 && ! side_effects_p (op0)
1494 && GET_MODE_CLASS (mode) != MODE_CC)
1495 return constm1_rtx;
1496 tem = simplify_associative_operation (code, mode, op0, op1);
1497 if (tem)
1498 return tem;
1499 break;
1501 case XOR:
1502 if (trueop1 == const0_rtx)
1503 return op0;
1504 if (GET_CODE (trueop1) == CONST_INT
1505 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1506 == GET_MODE_MASK (mode)))
1507 return simplify_gen_unary (NOT, mode, op0, mode);
1508 if (trueop0 == trueop1 && ! side_effects_p (op0)
1509 && GET_MODE_CLASS (mode) != MODE_CC)
1510 return const0_rtx;
1511 tem = simplify_associative_operation (code, mode, op0, op1);
1512 if (tem)
1513 return tem;
1514 break;
1516 case AND:
1517 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1518 return const0_rtx;
1519 if (GET_CODE (trueop1) == CONST_INT
1520 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1521 == GET_MODE_MASK (mode)))
1522 return op0;
1523 if (trueop0 == trueop1 && ! side_effects_p (op0)
1524 && GET_MODE_CLASS (mode) != MODE_CC)
1525 return op0;
1526 /* A & (~A) -> 0 */
1527 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1528 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1529 && ! side_effects_p (op0)
1530 && GET_MODE_CLASS (mode) != MODE_CC)
1531 return const0_rtx;
1532 tem = simplify_associative_operation (code, mode, op0, op1);
1533 if (tem)
1534 return tem;
1535 break;
1537 case UDIV:
1538 /* Convert divide by power of two into shift (divide by 1 handled
1539 below). */
1540 if (GET_CODE (trueop1) == CONST_INT
1541 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1542 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1544 /* Fall through.... */
1546 case DIV:
1547 if (trueop1 == CONST1_RTX (mode))
1549 /* On some platforms DIV uses narrower mode than its
1550 operands. */
1551 rtx x = gen_lowpart_common (mode, op0);
1552 if (x)
1553 return x;
1554 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1555 return gen_lowpart_SUBREG (mode, op0);
1556 else
1557 return op0;
1560 /* Maybe change 0 / x to 0. This transformation isn't safe for
1561 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1562 Nor is it safe for modes with signed zeros, since dividing
1563 0 by a negative number gives -0, not 0. */
1564 if (!HONOR_NANS (mode)
1565 && !HONOR_SIGNED_ZEROS (mode)
1566 && trueop0 == CONST0_RTX (mode)
1567 && ! side_effects_p (op1))
1568 return op0;
1570 /* Change division by a constant into multiplication. Only do
1571 this with -funsafe-math-optimizations. */
1572 else if (GET_CODE (trueop1) == CONST_DOUBLE
1573 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1574 && trueop1 != CONST0_RTX (mode)
1575 && flag_unsafe_math_optimizations)
1577 REAL_VALUE_TYPE d;
1578 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1580 if (! REAL_VALUES_EQUAL (d, dconst0))
1582 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1583 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1584 return simplify_gen_binary (MULT, mode, op0, tem);
1587 break;
1589 case UMOD:
1590 /* Handle modulus by power of two (mod with 1 handled below). */
1591 if (GET_CODE (trueop1) == CONST_INT
1592 && exact_log2 (INTVAL (trueop1)) > 0)
1593 return simplify_gen_binary (AND, mode, op0,
1594 GEN_INT (INTVAL (op1) - 1));
1596 /* Fall through.... */
1598 case MOD:
1599 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1600 && ! side_effects_p (op0) && ! side_effects_p (op1))
1601 return const0_rtx;
1602 break;
1604 case ROTATERT:
1605 case ROTATE:
1606 case ASHIFTRT:
1607 /* Rotating ~0 always results in ~0. */
1608 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1609 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1610 && ! side_effects_p (op1))
1611 return op0;
1613 /* Fall through.... */
1615 case ASHIFT:
1616 case LSHIFTRT:
1617 if (trueop1 == const0_rtx)
1618 return op0;
1619 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1620 return op0;
1621 break;
1623 case SMIN:
1624 if (width <= HOST_BITS_PER_WIDE_INT
1625 && GET_CODE (trueop1) == CONST_INT
1626 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1627 && ! side_effects_p (op0))
1628 return op1;
1629 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1630 return op0;
1631 tem = simplify_associative_operation (code, mode, op0, op1);
1632 if (tem)
1633 return tem;
1634 break;
1636 case SMAX:
1637 if (width <= HOST_BITS_PER_WIDE_INT
1638 && GET_CODE (trueop1) == CONST_INT
1639 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1640 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1641 && ! side_effects_p (op0))
1642 return op1;
1643 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1644 return op0;
1645 tem = simplify_associative_operation (code, mode, op0, op1);
1646 if (tem)
1647 return tem;
1648 break;
1650 case UMIN:
1651 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1652 return op1;
1653 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1654 return op0;
1655 tem = simplify_associative_operation (code, mode, op0, op1);
1656 if (tem)
1657 return tem;
1658 break;
1660 case UMAX:
1661 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1662 return op1;
1663 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1664 return op0;
1665 tem = simplify_associative_operation (code, mode, op0, op1);
1666 if (tem)
1667 return tem;
1668 break;
1670 case SS_PLUS:
1671 case US_PLUS:
1672 case SS_MINUS:
1673 case US_MINUS:
1674 /* ??? There are simplifications that can be done. */
1675 return 0;
1677 case VEC_SELECT:
1678 if (!VECTOR_MODE_P (mode))
1680 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1681 || (mode
1682 != GET_MODE_INNER (GET_MODE (trueop0)))
1683 || GET_CODE (trueop1) != PARALLEL
1684 || XVECLEN (trueop1, 0) != 1
1685 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1686 abort ();
1688 if (GET_CODE (trueop0) == CONST_VECTOR)
1689 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1691 else
1693 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1694 || (GET_MODE_INNER (mode)
1695 != GET_MODE_INNER (GET_MODE (trueop0)))
1696 || GET_CODE (trueop1) != PARALLEL)
1697 abort ();
1699 if (GET_CODE (trueop0) == CONST_VECTOR)
1701 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1702 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1703 rtvec v = rtvec_alloc (n_elts);
1704 unsigned int i;
1706 if (XVECLEN (trueop1, 0) != (int) n_elts)
1707 abort ();
1708 for (i = 0; i < n_elts; i++)
1710 rtx x = XVECEXP (trueop1, 0, i);
1712 if (GET_CODE (x) != CONST_INT)
1713 abort ();
1714 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1717 return gen_rtx_CONST_VECTOR (mode, v);
1720 return 0;
1721 case VEC_CONCAT:
1723 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1724 ? GET_MODE (trueop0)
1725 : GET_MODE_INNER (mode));
1726 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1727 ? GET_MODE (trueop1)
1728 : GET_MODE_INNER (mode));
1730 if (!VECTOR_MODE_P (mode)
1731 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1732 != GET_MODE_SIZE (mode)))
1733 abort ();
1735 if ((VECTOR_MODE_P (op0_mode)
1736 && (GET_MODE_INNER (mode)
1737 != GET_MODE_INNER (op0_mode)))
1738 || (!VECTOR_MODE_P (op0_mode)
1739 && GET_MODE_INNER (mode) != op0_mode))
1740 abort ();
1742 if ((VECTOR_MODE_P (op1_mode)
1743 && (GET_MODE_INNER (mode)
1744 != GET_MODE_INNER (op1_mode)))
1745 || (!VECTOR_MODE_P (op1_mode)
1746 && GET_MODE_INNER (mode) != op1_mode))
1747 abort ();
1749 if ((GET_CODE (trueop0) == CONST_VECTOR
1750 || GET_CODE (trueop0) == CONST_INT
1751 || GET_CODE (trueop0) == CONST_DOUBLE)
1752 && (GET_CODE (trueop1) == CONST_VECTOR
1753 || GET_CODE (trueop1) == CONST_INT
1754 || GET_CODE (trueop1) == CONST_DOUBLE))
1756 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1757 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1758 rtvec v = rtvec_alloc (n_elts);
1759 unsigned int i;
1760 unsigned in_n_elts = 1;
1762 if (VECTOR_MODE_P (op0_mode))
1763 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1764 for (i = 0; i < n_elts; i++)
1766 if (i < in_n_elts)
1768 if (!VECTOR_MODE_P (op0_mode))
1769 RTVEC_ELT (v, i) = trueop0;
1770 else
1771 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1773 else
1775 if (!VECTOR_MODE_P (op1_mode))
1776 RTVEC_ELT (v, i) = trueop1;
1777 else
1778 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1779 i - in_n_elts);
1783 return gen_rtx_CONST_VECTOR (mode, v);
1786 return 0;
1788 default:
1789 abort ();
1792 return 0;
1795 /* Get the integer argument values in two forms:
1796 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1798 arg0 = INTVAL (trueop0);
1799 arg1 = INTVAL (trueop1);
1801 if (width < HOST_BITS_PER_WIDE_INT)
1803 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1804 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1806 arg0s = arg0;
1807 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1808 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1810 arg1s = arg1;
1811 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1812 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1814 else
1816 arg0s = arg0;
1817 arg1s = arg1;
1820 /* Compute the value of the arithmetic. */
1822 switch (code)
1824 case PLUS:
1825 val = arg0s + arg1s;
1826 break;
1828 case MINUS:
1829 val = arg0s - arg1s;
1830 break;
1832 case MULT:
1833 val = arg0s * arg1s;
1834 break;
1836 case DIV:
1837 if (arg1s == 0
1838 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1839 && arg1s == -1))
1840 return 0;
1841 val = arg0s / arg1s;
1842 break;
1844 case MOD:
1845 if (arg1s == 0
1846 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1847 && arg1s == -1))
1848 return 0;
1849 val = arg0s % arg1s;
1850 break;
1852 case UDIV:
1853 if (arg1 == 0
1854 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1855 && arg1s == -1))
1856 return 0;
1857 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1858 break;
1860 case UMOD:
1861 if (arg1 == 0
1862 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1863 && arg1s == -1))
1864 return 0;
1865 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1866 break;
1868 case AND:
1869 val = arg0 & arg1;
1870 break;
1872 case IOR:
1873 val = arg0 | arg1;
1874 break;
1876 case XOR:
1877 val = arg0 ^ arg1;
1878 break;
1880 case LSHIFTRT:
1881 /* If shift count is undefined, don't fold it; let the machine do
1882 what it wants. But truncate it if the machine will do that. */
1883 if (arg1 < 0)
1884 return 0;
1886 #ifdef SHIFT_COUNT_TRUNCATED
1887 if (SHIFT_COUNT_TRUNCATED)
1888 arg1 %= width;
1889 #endif
1891 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1892 break;
1894 case ASHIFT:
1895 if (arg1 < 0)
1896 return 0;
1898 #ifdef SHIFT_COUNT_TRUNCATED
1899 if (SHIFT_COUNT_TRUNCATED)
1900 arg1 %= width;
1901 #endif
1903 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1904 break;
1906 case ASHIFTRT:
1907 if (arg1 < 0)
1908 return 0;
1910 #ifdef SHIFT_COUNT_TRUNCATED
1911 if (SHIFT_COUNT_TRUNCATED)
1912 arg1 %= width;
1913 #endif
1915 val = arg0s >> arg1;
1917 /* Bootstrap compiler may not have sign extended the right shift.
1918 Manually extend the sign to insure bootstrap cc matches gcc. */
1919 if (arg0s < 0 && arg1 > 0)
1920 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1922 break;
1924 case ROTATERT:
1925 if (arg1 < 0)
1926 return 0;
1928 arg1 %= width;
1929 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1930 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1931 break;
1933 case ROTATE:
1934 if (arg1 < 0)
1935 return 0;
1937 arg1 %= width;
1938 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1939 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1940 break;
1942 case COMPARE:
1943 /* Do nothing here. */
1944 return 0;
1946 case SMIN:
1947 val = arg0s <= arg1s ? arg0s : arg1s;
1948 break;
1950 case UMIN:
1951 val = ((unsigned HOST_WIDE_INT) arg0
1952 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1953 break;
1955 case SMAX:
1956 val = arg0s > arg1s ? arg0s : arg1s;
1957 break;
1959 case UMAX:
1960 val = ((unsigned HOST_WIDE_INT) arg0
1961 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1962 break;
1964 case SS_PLUS:
1965 case US_PLUS:
1966 case SS_MINUS:
1967 case US_MINUS:
1968 /* ??? There are simplifications that can be done. */
1969 return 0;
1971 default:
1972 abort ();
1975 val = trunc_int_for_mode (val, mode);
1977 return GEN_INT (val);
1980 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1981 PLUS or MINUS.
1983 Rather than test for specific case, we do this by a brute-force method
1984 and do all possible simplifications until no more changes occur. Then
1985 we rebuild the operation.
1987 If FORCE is true, then always generate the rtx. This is used to
1988 canonicalize stuff emitted from simplify_gen_binary. Note that this
1989 can still fail if the rtx is too complex. It won't fail just because
1990 the result is not 'simpler' than the input, however. */
1992 struct simplify_plus_minus_op_data
1994 rtx op;
1995 int neg;
1998 static int
1999 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2001 const struct simplify_plus_minus_op_data *d1 = p1;
2002 const struct simplify_plus_minus_op_data *d2 = p2;
2004 return (commutative_operand_precedence (d2->op)
2005 - commutative_operand_precedence (d1->op));
2008 static rtx
2009 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2010 rtx op1, int force)
2012 struct simplify_plus_minus_op_data ops[8];
2013 rtx result, tem;
2014 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2015 int first, negate, changed;
2016 int i, j;
2018 memset (ops, 0, sizeof ops);
2020 /* Set up the two operands and then expand them until nothing has been
2021 changed. If we run out of room in our array, give up; this should
2022 almost never happen. */
2024 ops[0].op = op0;
2025 ops[0].neg = 0;
2026 ops[1].op = op1;
2027 ops[1].neg = (code == MINUS);
2031 changed = 0;
2033 for (i = 0; i < n_ops; i++)
2035 rtx this_op = ops[i].op;
2036 int this_neg = ops[i].neg;
2037 enum rtx_code this_code = GET_CODE (this_op);
2039 switch (this_code)
2041 case PLUS:
2042 case MINUS:
2043 if (n_ops == 7)
2044 return NULL_RTX;
2046 ops[n_ops].op = XEXP (this_op, 1);
2047 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2048 n_ops++;
2050 ops[i].op = XEXP (this_op, 0);
2051 input_ops++;
2052 changed = 1;
2053 break;
2055 case NEG:
2056 ops[i].op = XEXP (this_op, 0);
2057 ops[i].neg = ! this_neg;
2058 changed = 1;
2059 break;
2061 case CONST:
2062 if (n_ops < 7
2063 && GET_CODE (XEXP (this_op, 0)) == PLUS
2064 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2065 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2067 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2068 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2069 ops[n_ops].neg = this_neg;
2070 n_ops++;
2071 input_consts++;
2072 changed = 1;
2074 break;
2076 case NOT:
2077 /* ~a -> (-a - 1) */
2078 if (n_ops != 7)
2080 ops[n_ops].op = constm1_rtx;
2081 ops[n_ops++].neg = this_neg;
2082 ops[i].op = XEXP (this_op, 0);
2083 ops[i].neg = !this_neg;
2084 changed = 1;
2086 break;
2088 case CONST_INT:
2089 if (this_neg)
2091 ops[i].op = neg_const_int (mode, this_op);
2092 ops[i].neg = 0;
2093 changed = 1;
2095 break;
2097 default:
2098 break;
2102 while (changed);
2104 /* If we only have two operands, we can't do anything. */
2105 if (n_ops <= 2 && !force)
2106 return NULL_RTX;
2108 /* Count the number of CONSTs we didn't split above. */
2109 for (i = 0; i < n_ops; i++)
2110 if (GET_CODE (ops[i].op) == CONST)
2111 input_consts++;
2113 /* Now simplify each pair of operands until nothing changes. The first
2114 time through just simplify constants against each other. */
2116 first = 1;
2119 changed = first;
2121 for (i = 0; i < n_ops - 1; i++)
2122 for (j = i + 1; j < n_ops; j++)
2124 rtx lhs = ops[i].op, rhs = ops[j].op;
2125 int lneg = ops[i].neg, rneg = ops[j].neg;
2127 if (lhs != 0 && rhs != 0
2128 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2130 enum rtx_code ncode = PLUS;
2132 if (lneg != rneg)
2134 ncode = MINUS;
2135 if (lneg)
2136 tem = lhs, lhs = rhs, rhs = tem;
2138 else if (swap_commutative_operands_p (lhs, rhs))
2139 tem = lhs, lhs = rhs, rhs = tem;
2141 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2143 /* Reject "simplifications" that just wrap the two
2144 arguments in a CONST. Failure to do so can result
2145 in infinite recursion with simplify_binary_operation
2146 when it calls us to simplify CONST operations. */
2147 if (tem
2148 && ! (GET_CODE (tem) == CONST
2149 && GET_CODE (XEXP (tem, 0)) == ncode
2150 && XEXP (XEXP (tem, 0), 0) == lhs
2151 && XEXP (XEXP (tem, 0), 1) == rhs)
2152 /* Don't allow -x + -1 -> ~x simplifications in the
2153 first pass. This allows us the chance to combine
2154 the -1 with other constants. */
2155 && ! (first
2156 && GET_CODE (tem) == NOT
2157 && XEXP (tem, 0) == rhs))
2159 lneg &= rneg;
2160 if (GET_CODE (tem) == NEG)
2161 tem = XEXP (tem, 0), lneg = !lneg;
2162 if (GET_CODE (tem) == CONST_INT && lneg)
2163 tem = neg_const_int (mode, tem), lneg = 0;
2165 ops[i].op = tem;
2166 ops[i].neg = lneg;
2167 ops[j].op = NULL_RTX;
2168 changed = 1;
2173 first = 0;
2175 while (changed);
2177 /* Pack all the operands to the lower-numbered entries. */
2178 for (i = 0, j = 0; j < n_ops; j++)
2179 if (ops[j].op)
2180 ops[i++] = ops[j];
2181 n_ops = i;
2183 /* Sort the operations based on swap_commutative_operands_p. */
2184 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2186 /* We suppressed creation of trivial CONST expressions in the
2187 combination loop to avoid recursion. Create one manually now.
2188 The combination loop should have ensured that there is exactly
2189 one CONST_INT, and the sort will have ensured that it is last
2190 in the array and that any other constant will be next-to-last. */
2192 if (n_ops > 1
2193 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2194 && CONSTANT_P (ops[n_ops - 2].op))
2196 rtx value = ops[n_ops - 1].op;
2197 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2198 value = neg_const_int (mode, value);
2199 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2200 n_ops--;
2203 /* Count the number of CONSTs that we generated. */
2204 n_consts = 0;
2205 for (i = 0; i < n_ops; i++)
2206 if (GET_CODE (ops[i].op) == CONST)
2207 n_consts++;
2209 /* Give up if we didn't reduce the number of operands we had. Make
2210 sure we count a CONST as two operands. If we have the same
2211 number of operands, but have made more CONSTs than before, this
2212 is also an improvement, so accept it. */
2213 if (!force
2214 && (n_ops + n_consts > input_ops
2215 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2216 return NULL_RTX;
2218 /* Put a non-negated operand first. If there aren't any, make all
2219 operands positive and negate the whole thing later. */
2221 negate = 0;
2222 for (i = 0; i < n_ops && ops[i].neg; i++)
2223 continue;
2224 if (i == n_ops)
2226 for (i = 0; i < n_ops; i++)
2227 ops[i].neg = 0;
2228 negate = 1;
2230 else if (i != 0)
2232 tem = ops[0].op;
2233 ops[0] = ops[i];
2234 ops[i].op = tem;
2235 ops[i].neg = 1;
2238 /* Now make the result by performing the requested operations. */
2239 result = ops[0].op;
2240 for (i = 1; i < n_ops; i++)
2241 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2242 mode, result, ops[i].op);
2244 return negate ? gen_rtx_NEG (mode, result) : result;
2247 /* Like simplify_binary_operation except used for relational operators.
2248 MODE is the mode of the operands, not that of the result. If MODE
2249 is VOIDmode, both operands must also be VOIDmode and we compare the
2250 operands in "infinite precision".
2252 If no simplification is possible, this function returns zero. Otherwise,
2253 it returns either const_true_rtx or const0_rtx. */
2256 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2257 rtx op0, rtx op1)
2259 int equal, op0lt, op0ltu, op1lt, op1ltu;
2260 rtx tem;
2261 rtx trueop0;
2262 rtx trueop1;
2264 if (mode == VOIDmode
2265 && (GET_MODE (op0) != VOIDmode
2266 || GET_MODE (op1) != VOIDmode))
2267 abort ();
2269 /* If op0 is a compare, extract the comparison arguments from it. */
2270 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2271 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2273 trueop0 = avoid_constant_pool_reference (op0);
2274 trueop1 = avoid_constant_pool_reference (op1);
2276 /* We can't simplify MODE_CC values since we don't know what the
2277 actual comparison is. */
2278 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2279 return 0;
2281 /* Make sure the constant is second. */
2282 if (swap_commutative_operands_p (trueop0, trueop1))
2284 tem = op0, op0 = op1, op1 = tem;
2285 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2286 code = swap_condition (code);
2289 /* For integer comparisons of A and B maybe we can simplify A - B and can
2290 then simplify a comparison of that with zero. If A and B are both either
2291 a register or a CONST_INT, this can't help; testing for these cases will
2292 prevent infinite recursion here and speed things up.
2294 If CODE is an unsigned comparison, then we can never do this optimization,
2295 because it gives an incorrect result if the subtraction wraps around zero.
2296 ANSI C defines unsigned operations such that they never overflow, and
2297 thus such cases can not be ignored. */
2299 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2300 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2301 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2302 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2303 && code != GTU && code != GEU && code != LTU && code != LEU)
2304 return simplify_relational_operation (signed_condition (code),
2305 mode, tem, const0_rtx);
2307 if (flag_unsafe_math_optimizations && code == ORDERED)
2308 return const_true_rtx;
2310 if (flag_unsafe_math_optimizations && code == UNORDERED)
2311 return const0_rtx;
2313 /* For modes without NaNs, if the two operands are equal, we know the
2314 result except if they have side-effects. */
2315 if (! HONOR_NANS (GET_MODE (trueop0))
2316 && rtx_equal_p (trueop0, trueop1)
2317 && ! side_effects_p (trueop0))
2318 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2320 /* If the operands are floating-point constants, see if we can fold
2321 the result. */
2322 else if (GET_CODE (trueop0) == CONST_DOUBLE
2323 && GET_CODE (trueop1) == CONST_DOUBLE
2324 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2326 REAL_VALUE_TYPE d0, d1;
2328 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2329 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2331 /* Comparisons are unordered iff at least one of the values is NaN. */
2332 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2333 switch (code)
2335 case UNEQ:
2336 case UNLT:
2337 case UNGT:
2338 case UNLE:
2339 case UNGE:
2340 case NE:
2341 case UNORDERED:
2342 return const_true_rtx;
2343 case EQ:
2344 case LT:
2345 case GT:
2346 case LE:
2347 case GE:
2348 case LTGT:
2349 case ORDERED:
2350 return const0_rtx;
2351 default:
2352 return 0;
2355 equal = REAL_VALUES_EQUAL (d0, d1);
2356 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2357 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2360 /* Otherwise, see if the operands are both integers. */
2361 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2362 && (GET_CODE (trueop0) == CONST_DOUBLE
2363 || GET_CODE (trueop0) == CONST_INT)
2364 && (GET_CODE (trueop1) == CONST_DOUBLE
2365 || GET_CODE (trueop1) == CONST_INT))
2367 int width = GET_MODE_BITSIZE (mode);
2368 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2369 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2371 /* Get the two words comprising each integer constant. */
2372 if (GET_CODE (trueop0) == CONST_DOUBLE)
2374 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2375 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2377 else
2379 l0u = l0s = INTVAL (trueop0);
2380 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2383 if (GET_CODE (trueop1) == CONST_DOUBLE)
2385 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2386 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2388 else
2390 l1u = l1s = INTVAL (trueop1);
2391 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2394 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2395 we have to sign or zero-extend the values. */
2396 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2398 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2399 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2401 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2402 l0s |= ((HOST_WIDE_INT) (-1) << width);
2404 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2405 l1s |= ((HOST_WIDE_INT) (-1) << width);
2407 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2408 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2410 equal = (h0u == h1u && l0u == l1u);
2411 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2412 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2413 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2414 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2417 /* Otherwise, there are some code-specific tests we can make. */
2418 else
2420 switch (code)
2422 case EQ:
2423 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2424 return const0_rtx;
2425 break;
2427 case NE:
2428 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2429 return const_true_rtx;
2430 break;
2432 case GEU:
2433 /* Unsigned values are never negative. */
2434 if (trueop1 == const0_rtx)
2435 return const_true_rtx;
2436 break;
2438 case LTU:
2439 if (trueop1 == const0_rtx)
2440 return const0_rtx;
2441 break;
2443 case LEU:
2444 /* Unsigned values are never greater than the largest
2445 unsigned value. */
2446 if (GET_CODE (trueop1) == CONST_INT
2447 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2448 && INTEGRAL_MODE_P (mode))
2449 return const_true_rtx;
2450 break;
2452 case GTU:
2453 if (GET_CODE (trueop1) == CONST_INT
2454 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2455 && INTEGRAL_MODE_P (mode))
2456 return const0_rtx;
2457 break;
2459 case LT:
2460 /* Optimize abs(x) < 0.0. */
2461 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2463 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2464 : trueop0;
2465 if (GET_CODE (tem) == ABS)
2466 return const0_rtx;
2468 break;
2470 case GE:
2471 /* Optimize abs(x) >= 0.0. */
2472 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2474 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2475 : trueop0;
2476 if (GET_CODE (tem) == ABS)
2477 return const_true_rtx;
2479 break;
2481 case UNGE:
2482 /* Optimize ! (abs(x) < 0.0). */
2483 if (trueop1 == CONST0_RTX (mode))
2485 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2486 : trueop0;
2487 if (GET_CODE (tem) == ABS)
2488 return const_true_rtx;
2490 break;
2492 default:
2493 break;
2496 return 0;
2499 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2500 as appropriate. */
2501 switch (code)
2503 case EQ:
2504 case UNEQ:
2505 return equal ? const_true_rtx : const0_rtx;
2506 case NE:
2507 case LTGT:
2508 return ! equal ? const_true_rtx : const0_rtx;
2509 case LT:
2510 case UNLT:
2511 return op0lt ? const_true_rtx : const0_rtx;
2512 case GT:
2513 case UNGT:
2514 return op1lt ? const_true_rtx : const0_rtx;
2515 case LTU:
2516 return op0ltu ? const_true_rtx : const0_rtx;
2517 case GTU:
2518 return op1ltu ? const_true_rtx : const0_rtx;
2519 case LE:
2520 case UNLE:
2521 return equal || op0lt ? const_true_rtx : const0_rtx;
2522 case GE:
2523 case UNGE:
2524 return equal || op1lt ? const_true_rtx : const0_rtx;
2525 case LEU:
2526 return equal || op0ltu ? const_true_rtx : const0_rtx;
2527 case GEU:
2528 return equal || op1ltu ? const_true_rtx : const0_rtx;
2529 case ORDERED:
2530 return const_true_rtx;
2531 case UNORDERED:
2532 return const0_rtx;
2533 default:
2534 abort ();
2538 /* Simplify CODE, an operation with result mode MODE and three operands,
2539 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2540 a constant. Return 0 if no simplifications is possible. */
2543 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2544 enum machine_mode op0_mode, rtx op0, rtx op1,
2545 rtx op2)
2547 unsigned int width = GET_MODE_BITSIZE (mode);
2549 /* VOIDmode means "infinite" precision. */
2550 if (width == 0)
2551 width = HOST_BITS_PER_WIDE_INT;
2553 switch (code)
2555 case SIGN_EXTRACT:
2556 case ZERO_EXTRACT:
2557 if (GET_CODE (op0) == CONST_INT
2558 && GET_CODE (op1) == CONST_INT
2559 && GET_CODE (op2) == CONST_INT
2560 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2561 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2563 /* Extracting a bit-field from a constant */
2564 HOST_WIDE_INT val = INTVAL (op0);
2566 if (BITS_BIG_ENDIAN)
2567 val >>= (GET_MODE_BITSIZE (op0_mode)
2568 - INTVAL (op2) - INTVAL (op1));
2569 else
2570 val >>= INTVAL (op2);
2572 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2574 /* First zero-extend. */
2575 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2576 /* If desired, propagate sign bit. */
2577 if (code == SIGN_EXTRACT
2578 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2579 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2582 /* Clear the bits that don't belong in our mode,
2583 unless they and our sign bit are all one.
2584 So we get either a reasonable negative value or a reasonable
2585 unsigned value for this mode. */
2586 if (width < HOST_BITS_PER_WIDE_INT
2587 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2588 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2589 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2591 return GEN_INT (val);
2593 break;
2595 case IF_THEN_ELSE:
2596 if (GET_CODE (op0) == CONST_INT)
2597 return op0 != const0_rtx ? op1 : op2;
2599 /* Convert a == b ? b : a to "a". */
2600 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2601 && !HONOR_NANS (mode)
2602 && rtx_equal_p (XEXP (op0, 0), op1)
2603 && rtx_equal_p (XEXP (op0, 1), op2))
2604 return op1;
2605 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2606 && !HONOR_NANS (mode)
2607 && rtx_equal_p (XEXP (op0, 1), op1)
2608 && rtx_equal_p (XEXP (op0, 0), op2))
2609 return op2;
2610 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2612 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2613 ? GET_MODE (XEXP (op0, 1))
2614 : GET_MODE (XEXP (op0, 0)));
2615 rtx temp;
2616 if (cmp_mode == VOIDmode)
2617 cmp_mode = op0_mode;
2618 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2619 XEXP (op0, 0), XEXP (op0, 1));
2621 /* See if any simplifications were possible. */
2622 if (temp == const0_rtx)
2623 return op2;
2624 else if (temp == const1_rtx)
2625 return op1;
2626 else if (temp)
2627 op0 = temp;
2629 /* Look for happy constants in op1 and op2. */
2630 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2632 HOST_WIDE_INT t = INTVAL (op1);
2633 HOST_WIDE_INT f = INTVAL (op2);
2635 if (t == STORE_FLAG_VALUE && f == 0)
2636 code = GET_CODE (op0);
2637 else if (t == 0 && f == STORE_FLAG_VALUE)
2639 enum rtx_code tmp;
2640 tmp = reversed_comparison_code (op0, NULL_RTX);
2641 if (tmp == UNKNOWN)
2642 break;
2643 code = tmp;
2645 else
2646 break;
2648 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2651 break;
2652 case VEC_MERGE:
2653 if (GET_MODE (op0) != mode
2654 || GET_MODE (op1) != mode
2655 || !VECTOR_MODE_P (mode))
2656 abort ();
2657 op2 = avoid_constant_pool_reference (op2);
2658 if (GET_CODE (op2) == CONST_INT)
2660 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2661 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2662 int mask = (1 << n_elts) - 1;
2664 if (!(INTVAL (op2) & mask))
2665 return op1;
2666 if ((INTVAL (op2) & mask) == mask)
2667 return op0;
2669 op0 = avoid_constant_pool_reference (op0);
2670 op1 = avoid_constant_pool_reference (op1);
2671 if (GET_CODE (op0) == CONST_VECTOR
2672 && GET_CODE (op1) == CONST_VECTOR)
2674 rtvec v = rtvec_alloc (n_elts);
2675 unsigned int i;
2677 for (i = 0; i < n_elts; i++)
2678 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2679 ? CONST_VECTOR_ELT (op0, i)
2680 : CONST_VECTOR_ELT (op1, i));
2681 return gen_rtx_CONST_VECTOR (mode, v);
2684 break;
2686 default:
2687 abort ();
2690 return 0;
2693 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2694 Return 0 if no simplifications is possible. */
2696 simplify_subreg (enum machine_mode outermode, rtx op,
2697 enum machine_mode innermode, unsigned int byte)
2699 /* Little bit of sanity checking. */
2700 if (innermode == VOIDmode || outermode == VOIDmode
2701 || innermode == BLKmode || outermode == BLKmode)
2702 abort ();
2704 if (GET_MODE (op) != innermode
2705 && GET_MODE (op) != VOIDmode)
2706 abort ();
2708 if (byte % GET_MODE_SIZE (outermode)
2709 || byte >= GET_MODE_SIZE (innermode))
2710 abort ();
2712 if (outermode == innermode && !byte)
2713 return op;
2715 /* Simplify subregs of vector constants. */
2716 if (GET_CODE (op) == CONST_VECTOR)
2718 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2719 const unsigned int offset = byte / elt_size;
2720 rtx elt;
2722 if (GET_MODE_INNER (innermode) == outermode)
2724 elt = CONST_VECTOR_ELT (op, offset);
2726 /* ?? We probably don't need this copy_rtx because constants
2727 can be shared. ?? */
2729 return copy_rtx (elt);
2731 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2732 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2734 return (gen_rtx_CONST_VECTOR
2735 (outermode,
2736 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2737 &CONST_VECTOR_ELT (op, offset))));
2739 else if (GET_MODE_CLASS (outermode) == MODE_INT
2740 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2742 /* This happens when the target register size is smaller then
2743 the vector mode, and we synthesize operations with vectors
2744 of elements that are smaller than the register size. */
2745 HOST_WIDE_INT sum = 0, high = 0;
2746 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2747 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2748 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2749 int shift = BITS_PER_UNIT * elt_size;
2750 unsigned HOST_WIDE_INT unit_mask;
2752 unit_mask = (unsigned HOST_WIDE_INT) -1
2753 >> (sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - shift);
2755 for (; n_elts--; i += step)
2757 elt = CONST_VECTOR_ELT (op, i);
2758 if (GET_CODE (elt) == CONST_DOUBLE
2759 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2761 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2762 elt);
2763 if (! elt)
2764 return NULL_RTX;
2766 if (GET_CODE (elt) != CONST_INT)
2767 return NULL_RTX;
2768 /* Avoid overflow. */
2769 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
2770 return NULL_RTX;
2771 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2772 sum = (sum << shift) + (INTVAL (elt) & unit_mask);
2774 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2775 return GEN_INT (trunc_int_for_mode (sum, outermode));
2776 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2777 return immed_double_const (sum, high, outermode);
2778 else
2779 return NULL_RTX;
2781 else if (GET_MODE_CLASS (outermode) == MODE_INT
2782 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2784 enum machine_mode new_mode
2785 = int_mode_for_mode (GET_MODE_INNER (innermode));
2786 int subbyte = byte % elt_size;
2788 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2789 if (! op)
2790 return NULL_RTX;
2791 return simplify_subreg (outermode, op, new_mode, subbyte);
2793 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2794 /* This shouldn't happen, but let's not do anything stupid. */
2795 return NULL_RTX;
2798 /* Attempt to simplify constant to non-SUBREG expression. */
2799 if (CONSTANT_P (op))
2801 int offset, part;
2802 unsigned HOST_WIDE_INT val = 0;
2804 if (VECTOR_MODE_P (outermode))
2806 /* Construct a CONST_VECTOR from individual subregs. */
2807 enum machine_mode submode = GET_MODE_INNER (outermode);
2808 int subsize = GET_MODE_UNIT_SIZE (outermode);
2809 int i, elts = GET_MODE_NUNITS (outermode);
2810 rtvec v = rtvec_alloc (elts);
2811 rtx elt;
2813 for (i = 0; i < elts; i++, byte += subsize)
2815 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2816 /* ??? It would be nice if we could actually make such subregs
2817 on targets that allow such relocations. */
2818 if (byte >= GET_MODE_SIZE (innermode))
2819 elt = CONST0_RTX (submode);
2820 else
2821 elt = simplify_subreg (submode, op, innermode, byte);
2822 if (! elt)
2823 return NULL_RTX;
2824 RTVEC_ELT (v, i) = elt;
2826 return gen_rtx_CONST_VECTOR (outermode, v);
2829 /* ??? This code is partly redundant with code below, but can handle
2830 the subregs of floats and similar corner cases.
2831 Later it we should move all simplification code here and rewrite
2832 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2833 using SIMPLIFY_SUBREG. */
2834 if (subreg_lowpart_offset (outermode, innermode) == byte
2835 && GET_CODE (op) != CONST_VECTOR)
2837 rtx new = gen_lowpart_if_possible (outermode, op);
2838 if (new)
2839 return new;
2842 /* Similar comment as above apply here. */
2843 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2844 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2845 && GET_MODE_CLASS (outermode) == MODE_INT)
2847 rtx new = constant_subword (op,
2848 (byte / UNITS_PER_WORD),
2849 innermode);
2850 if (new)
2851 return new;
2854 if (GET_MODE_CLASS (outermode) != MODE_INT
2855 && GET_MODE_CLASS (outermode) != MODE_CC)
2857 enum machine_mode new_mode = int_mode_for_mode (outermode);
2859 if (new_mode != innermode || byte != 0)
2861 op = simplify_subreg (new_mode, op, innermode, byte);
2862 if (! op)
2863 return NULL_RTX;
2864 return simplify_subreg (outermode, op, new_mode, 0);
2868 offset = byte * BITS_PER_UNIT;
2869 switch (GET_CODE (op))
2871 case CONST_DOUBLE:
2872 if (GET_MODE (op) != VOIDmode)
2873 break;
2875 /* We can't handle this case yet. */
2876 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2877 return NULL_RTX;
2879 part = offset >= HOST_BITS_PER_WIDE_INT;
2880 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2881 && BYTES_BIG_ENDIAN)
2882 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2883 && WORDS_BIG_ENDIAN))
2884 part = !part;
2885 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2886 offset %= HOST_BITS_PER_WIDE_INT;
2888 /* We've already picked the word we want from a double, so
2889 pretend this is actually an integer. */
2890 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2892 /* FALLTHROUGH */
2893 case CONST_INT:
2894 if (GET_CODE (op) == CONST_INT)
2895 val = INTVAL (op);
2897 /* We don't handle synthesizing of non-integral constants yet. */
2898 if (GET_MODE_CLASS (outermode) != MODE_INT)
2899 return NULL_RTX;
2901 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2903 if (WORDS_BIG_ENDIAN)
2904 offset = (GET_MODE_BITSIZE (innermode)
2905 - GET_MODE_BITSIZE (outermode) - offset);
2906 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2907 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2908 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2909 - 2 * (offset % BITS_PER_WORD));
2912 if (offset >= HOST_BITS_PER_WIDE_INT)
2913 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2914 else
2916 val >>= offset;
2917 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2918 val = trunc_int_for_mode (val, outermode);
2919 return GEN_INT (val);
2921 default:
2922 break;
2926 /* Changing mode twice with SUBREG => just change it once,
2927 or not at all if changing back op starting mode. */
2928 if (GET_CODE (op) == SUBREG)
2930 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2931 int final_offset = byte + SUBREG_BYTE (op);
2932 rtx new;
2934 if (outermode == innermostmode
2935 && byte == 0 && SUBREG_BYTE (op) == 0)
2936 return SUBREG_REG (op);
2938 /* The SUBREG_BYTE represents offset, as if the value were stored
2939 in memory. Irritating exception is paradoxical subreg, where
2940 we define SUBREG_BYTE to be 0. On big endian machines, this
2941 value should be negative. For a moment, undo this exception. */
2942 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2944 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2945 if (WORDS_BIG_ENDIAN)
2946 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2947 if (BYTES_BIG_ENDIAN)
2948 final_offset += difference % UNITS_PER_WORD;
2950 if (SUBREG_BYTE (op) == 0
2951 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2953 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2954 if (WORDS_BIG_ENDIAN)
2955 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2956 if (BYTES_BIG_ENDIAN)
2957 final_offset += difference % UNITS_PER_WORD;
2960 /* See whether resulting subreg will be paradoxical. */
2961 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2963 /* In nonparadoxical subregs we can't handle negative offsets. */
2964 if (final_offset < 0)
2965 return NULL_RTX;
2966 /* Bail out in case resulting subreg would be incorrect. */
2967 if (final_offset % GET_MODE_SIZE (outermode)
2968 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2969 return NULL_RTX;
2971 else
2973 int offset = 0;
2974 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2976 /* In paradoxical subreg, see if we are still looking on lower part.
2977 If so, our SUBREG_BYTE will be 0. */
2978 if (WORDS_BIG_ENDIAN)
2979 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2980 if (BYTES_BIG_ENDIAN)
2981 offset += difference % UNITS_PER_WORD;
2982 if (offset == final_offset)
2983 final_offset = 0;
2984 else
2985 return NULL_RTX;
2988 /* Recurse for further possible simplifications. */
2989 new = simplify_subreg (outermode, SUBREG_REG (op),
2990 GET_MODE (SUBREG_REG (op)),
2991 final_offset);
2992 if (new)
2993 return new;
2994 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2997 /* SUBREG of a hard register => just change the register number
2998 and/or mode. If the hard register is not valid in that mode,
2999 suppress this simplification. If the hard register is the stack,
3000 frame, or argument pointer, leave this as a SUBREG. */
3002 if (REG_P (op)
3003 && (! REG_FUNCTION_VALUE_P (op)
3004 || ! rtx_equal_function_value_matters)
3005 && REGNO (op) < FIRST_PSEUDO_REGISTER
3006 #ifdef CANNOT_CHANGE_MODE_CLASS
3007 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3008 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3009 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3010 #endif
3011 && ((reload_completed && !frame_pointer_needed)
3012 || (REGNO (op) != FRAME_POINTER_REGNUM
3013 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3014 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3015 #endif
3017 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3018 && REGNO (op) != ARG_POINTER_REGNUM
3019 #endif
3020 && REGNO (op) != STACK_POINTER_REGNUM
3021 && subreg_offset_representable_p (REGNO (op), innermode,
3022 byte, outermode))
3024 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3025 int final_regno = subreg_hard_regno (tem, 0);
3027 /* ??? We do allow it if the current REG is not valid for
3028 its mode. This is a kludge to work around how float/complex
3029 arguments are passed on 32-bit SPARC and should be fixed. */
3030 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3031 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3033 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3035 /* Propagate original regno. We don't have any way to specify
3036 the offset inside original regno, so do so only for lowpart.
3037 The information is used only by alias analysis that can not
3038 grog partial register anyway. */
3040 if (subreg_lowpart_offset (outermode, innermode) == byte)
3041 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3042 return x;
3046 /* If we have a SUBREG of a register that we are replacing and we are
3047 replacing it with a MEM, make a new MEM and try replacing the
3048 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3049 or if we would be widening it. */
3051 if (GET_CODE (op) == MEM
3052 && ! mode_dependent_address_p (XEXP (op, 0))
3053 /* Allow splitting of volatile memory references in case we don't
3054 have instruction to move the whole thing. */
3055 && (! MEM_VOLATILE_P (op)
3056 || ! have_insn_for (SET, innermode))
3057 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3058 return adjust_address_nv (op, outermode, byte);
3060 /* Handle complex values represented as CONCAT
3061 of real and imaginary part. */
3062 if (GET_CODE (op) == CONCAT)
3064 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
3065 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3066 unsigned int final_offset;
3067 rtx res;
3069 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3070 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3071 if (res)
3072 return res;
3073 /* We can at least simplify it by referring directly to the relevant part. */
3074 return gen_rtx_SUBREG (outermode, part, final_offset);
3077 return NULL_RTX;
3079 /* Make a SUBREG operation or equivalent if it folds. */
3082 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3083 enum machine_mode innermode, unsigned int byte)
3085 rtx new;
3086 /* Little bit of sanity checking. */
3087 if (innermode == VOIDmode || outermode == VOIDmode
3088 || innermode == BLKmode || outermode == BLKmode)
3089 abort ();
3091 if (GET_MODE (op) != innermode
3092 && GET_MODE (op) != VOIDmode)
3093 abort ();
3095 if (byte % GET_MODE_SIZE (outermode)
3096 || byte >= GET_MODE_SIZE (innermode))
3097 abort ();
3099 if (GET_CODE (op) == QUEUED)
3100 return NULL_RTX;
3102 new = simplify_subreg (outermode, op, innermode, byte);
3103 if (new)
3104 return new;
3106 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3107 return NULL_RTX;
3109 return gen_rtx_SUBREG (outermode, op, byte);
3111 /* Simplify X, an rtx expression.
3113 Return the simplified expression or NULL if no simplifications
3114 were possible.
3116 This is the preferred entry point into the simplification routines;
3117 however, we still allow passes to call the more specific routines.
3119 Right now GCC has three (yes, three) major bodies of RTL simplification
3120 code that need to be unified.
3122 1. fold_rtx in cse.c. This code uses various CSE specific
3123 information to aid in RTL simplification.
3125 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3126 it uses combine specific information to aid in RTL
3127 simplification.
3129 3. The routines in this file.
3132 Long term we want to only have one body of simplification code; to
3133 get to that state I recommend the following steps:
3135 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3136 which are not pass dependent state into these routines.
3138 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3139 use this routine whenever possible.
3141 3. Allow for pass dependent state to be provided to these
3142 routines and add simplifications based on the pass dependent
3143 state. Remove code from cse.c & combine.c that becomes
3144 redundant/dead.
3146 It will take time, but ultimately the compiler will be easier to
3147 maintain and improve. It's totally silly that when we add a
3148 simplification that it needs to be added to 4 places (3 for RTL
3149 simplification and 1 for tree simplification. */
3152 simplify_rtx (rtx x)
3154 enum rtx_code code = GET_CODE (x);
3155 enum machine_mode mode = GET_MODE (x);
3156 rtx temp;
3158 switch (GET_RTX_CLASS (code))
3160 case '1':
3161 return simplify_unary_operation (code, mode,
3162 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3163 case 'c':
3164 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3165 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3167 /* Fall through.... */
3169 case '2':
3170 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3172 case '3':
3173 case 'b':
3174 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3175 XEXP (x, 0), XEXP (x, 1),
3176 XEXP (x, 2));
3178 case '<':
3179 temp = simplify_relational_operation (code,
3180 ((GET_MODE (XEXP (x, 0))
3181 != VOIDmode)
3182 ? GET_MODE (XEXP (x, 0))
3183 : GET_MODE (XEXP (x, 1))),
3184 XEXP (x, 0), XEXP (x, 1));
3185 #ifdef FLOAT_STORE_FLAG_VALUE
3186 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3188 if (temp == const0_rtx)
3189 temp = CONST0_RTX (mode);
3190 else
3191 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3192 mode);
3194 #endif
3195 return temp;
3197 case 'x':
3198 if (code == SUBREG)
3199 return simplify_gen_subreg (mode, SUBREG_REG (x),
3200 GET_MODE (SUBREG_REG (x)),
3201 SUBREG_BYTE (x));
3202 if (code == CONSTANT_P_RTX)
3204 if (CONSTANT_P (XEXP (x, 0)))
3205 return const1_rtx;
3207 break;
3209 case 'o':
3210 if (code == LO_SUM)
3212 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3213 if (GET_CODE (XEXP (x, 0)) == HIGH
3214 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3215 return XEXP (x, 1);
3217 break;
3219 default:
3220 break;
3222 return NULL;