Add Paolo Bonzini to vector ChangeLog.
[official-gcc.git] / gcc / simplify-rtx.c
bloba9d4ba3e5d24597ea881c9c2e6e53a92f5b71b33
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
63 /* Negate a CONST_INT rtx, truncating (because a conversion from a
64 maximally negative number can overflow). */
65 static rtx
66 neg_const_int (enum machine_mode mode, rtx i)
68 return gen_int_mode (- INTVAL (i), mode);
71 /* Test whether expression, X, is an immediate constant that represents
72 the most significant bit of machine mode MODE. */
74 bool
75 mode_signbit_p (enum machine_mode mode, rtx x)
77 unsigned HOST_WIDE_INT val;
78 unsigned int width;
80 if (GET_MODE_CLASS (mode) != MODE_INT)
81 return false;
83 width = GET_MODE_BITSIZE (mode);
84 if (width == 0)
85 return false;
87 if (width <= HOST_BITS_PER_WIDE_INT
88 && GET_CODE (x) == CONST_INT)
89 val = INTVAL (x);
90 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_DOUBLE
92 && CONST_DOUBLE_LOW (x) == 0)
94 val = CONST_DOUBLE_HIGH (x);
95 width -= HOST_BITS_PER_WIDE_INT;
97 else
98 return false;
100 if (width < HOST_BITS_PER_WIDE_INT)
101 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
102 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
105 /* Make a binary operation by properly ordering the operands and
106 seeing if the expression folds. */
109 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
110 rtx op1)
112 rtx tem;
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
116 && swap_commutative_operands_p (op0, op1))
117 tem = op0, op0 = op1, op1 = tem;
119 /* If this simplifies, do it. */
120 tem = simplify_binary_operation (code, mode, op0, op1);
121 if (tem)
122 return tem;
124 /* Handle addition and subtraction specially. Otherwise, just form
125 the operation. */
127 if (code == PLUS || code == MINUS)
129 tem = simplify_plus_minus (code, mode, op0, op1, 1);
130 if (tem)
131 return tem;
134 return gen_rtx_fmt_ee (code, mode, op0, op1);
137 /* If X is a MEM referencing the constant pool, return the real value.
138 Otherwise return X. */
140 avoid_constant_pool_reference (rtx x)
142 rtx c, tmp, addr;
143 enum machine_mode cmode;
145 switch (GET_CODE (x))
147 case MEM:
148 break;
150 case FLOAT_EXTEND:
151 /* Handle float extensions of constant pool references. */
152 tmp = XEXP (x, 0);
153 c = avoid_constant_pool_reference (tmp);
154 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
156 REAL_VALUE_TYPE d;
158 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
159 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
161 return x;
163 default:
164 return x;
167 addr = XEXP (x, 0);
169 /* Call target hook to avoid the effects of -fpic etc.... */
170 addr = targetm.delegitimize_address (addr);
172 if (GET_CODE (addr) == LO_SUM)
173 addr = XEXP (addr, 1);
175 if (GET_CODE (addr) != SYMBOL_REF
176 || ! CONSTANT_POOL_ADDRESS_P (addr))
177 return x;
179 c = get_pool_constant (addr);
180 cmode = get_pool_mode (addr);
182 /* If we're accessing the constant in a different mode than it was
183 originally stored, attempt to fix that up via subreg simplifications.
184 If that fails we have no choice but to return the original memory. */
185 if (cmode != GET_MODE (x))
187 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
188 return c ? c : x;
191 return c;
194 /* Make a unary operation by first seeing if it folds and otherwise making
195 the specified operation. */
198 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
199 enum machine_mode op_mode)
201 rtx tem;
203 /* If this simplifies, use it. */
204 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
205 return tem;
207 return gen_rtx_fmt_e (code, mode, op);
210 /* Likewise for ternary operations. */
213 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
214 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
216 rtx tem;
218 /* If this simplifies, use it. */
219 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
220 op0, op1, op2)))
221 return tem;
223 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
226 /* Likewise, for relational operations.
227 CMP_MODE specifies mode comparison is done in. */
230 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
231 enum machine_mode cmp_mode, rtx op0, rtx op1)
233 rtx tem;
235 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
236 op0, op1)))
237 return tem;
239 return gen_rtx_fmt_ee (code, mode, op0, op1);
242 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
243 resulting RTX. Return a new RTX which is as simplified as possible. */
246 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
248 enum rtx_code code = GET_CODE (x);
249 enum machine_mode mode = GET_MODE (x);
250 enum machine_mode op_mode;
251 rtx op0, op1, op2;
253 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
254 to build a new expression substituting recursively. If we can't do
255 anything, return our input. */
257 if (x == old_rtx)
258 return new_rtx;
260 switch (GET_RTX_CLASS (code))
262 case RTX_UNARY:
263 op0 = XEXP (x, 0);
264 op_mode = GET_MODE (op0);
265 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
266 if (op0 == XEXP (x, 0))
267 return x;
268 return simplify_gen_unary (code, mode, op0, op_mode);
270 case RTX_BIN_ARITH:
271 case RTX_COMM_ARITH:
272 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
273 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
274 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
275 return x;
276 return simplify_gen_binary (code, mode, op0, op1);
278 case RTX_COMPARE:
279 case RTX_COMM_COMPARE:
280 op0 = XEXP (x, 0);
281 op1 = XEXP (x, 1);
282 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
283 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
284 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
285 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
286 return x;
287 return simplify_gen_relational (code, mode, op_mode, op0, op1);
289 case RTX_TERNARY:
290 case RTX_BITFIELD_OPS:
291 op0 = XEXP (x, 0);
292 op_mode = GET_MODE (op0);
293 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
294 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
295 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
296 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
297 return x;
298 if (op_mode == VOIDmode)
299 op_mode = GET_MODE (op0);
300 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
302 case RTX_EXTRA:
303 /* The only case we try to handle is a SUBREG. */
304 if (code == SUBREG)
306 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
307 if (op0 == SUBREG_REG (x))
308 return x;
309 op0 = simplify_gen_subreg (GET_MODE (x), op0,
310 GET_MODE (SUBREG_REG (x)),
311 SUBREG_BYTE (x));
312 return op0 ? op0 : x;
314 break;
316 case RTX_OBJ:
317 if (code == MEM)
319 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
320 if (op0 == XEXP (x, 0))
321 return x;
322 return replace_equiv_address_nv (x, op0);
324 else if (code == LO_SUM)
326 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
327 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
329 /* (lo_sum (high x) x) -> x */
330 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
331 return op1;
333 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
334 return x;
335 return gen_rtx_LO_SUM (mode, op0, op1);
337 else if (code == REG)
339 if (REG_P (old_rtx) && REGNO (x) == REGNO (old_rtx))
340 return new_rtx;
342 break;
344 default:
345 break;
347 return x;
350 /* Try to simplify a unary operation CODE whose output mode is to be
351 MODE with input operand OP whose mode was originally OP_MODE.
352 Return zero if no simplification can be made. */
354 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
355 rtx op, enum machine_mode op_mode)
357 unsigned int width = GET_MODE_BITSIZE (mode);
358 rtx trueop = avoid_constant_pool_reference (op);
360 if (code == VEC_DUPLICATE)
362 gcc_assert (VECTOR_MODE_P (mode));
363 if (GET_MODE (trueop) != VOIDmode)
365 if (!VECTOR_MODE_P (GET_MODE (trueop)))
366 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
367 else
368 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
369 (GET_MODE (trueop)));
371 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
372 || GET_CODE (trueop) == CONST_VECTOR)
374 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
375 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
376 rtvec v = rtvec_alloc (n_elts);
377 unsigned int i;
379 if (GET_CODE (trueop) != CONST_VECTOR)
380 for (i = 0; i < n_elts; i++)
381 RTVEC_ELT (v, i) = trueop;
382 else
384 enum machine_mode inmode = GET_MODE (trueop);
385 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
386 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
388 gcc_assert (in_n_elts < n_elts);
389 gcc_assert ((n_elts % in_n_elts) == 0);
390 for (i = 0; i < n_elts; i++)
391 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
393 return gen_rtx_CONST_VECTOR (mode, v);
396 else if (GET_CODE (op) == CONST)
397 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
399 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
401 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
402 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
403 enum machine_mode opmode = GET_MODE (trueop);
404 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
405 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
406 rtvec v = rtvec_alloc (n_elts);
407 unsigned int i;
409 gcc_assert (op_n_elts == n_elts);
410 for (i = 0; i < n_elts; i++)
412 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
413 CONST_VECTOR_ELT (trueop, i),
414 GET_MODE_INNER (opmode));
415 if (!x)
416 return 0;
417 RTVEC_ELT (v, i) = x;
419 return gen_rtx_CONST_VECTOR (mode, v);
422 /* The order of these tests is critical so that, for example, we don't
423 check the wrong mode (input vs. output) for a conversion operation,
424 such as FIX. At some point, this should be simplified. */
426 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
427 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
429 HOST_WIDE_INT hv, lv;
430 REAL_VALUE_TYPE d;
432 if (GET_CODE (trueop) == CONST_INT)
433 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
434 else
435 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
437 REAL_VALUE_FROM_INT (d, lv, hv, mode);
438 d = real_value_truncate (mode, d);
439 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
441 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
442 && (GET_CODE (trueop) == CONST_DOUBLE
443 || GET_CODE (trueop) == CONST_INT))
445 HOST_WIDE_INT hv, lv;
446 REAL_VALUE_TYPE d;
448 if (GET_CODE (trueop) == CONST_INT)
449 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
450 else
451 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
453 if (op_mode == VOIDmode)
455 /* We don't know how to interpret negative-looking numbers in
456 this case, so don't try to fold those. */
457 if (hv < 0)
458 return 0;
460 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
462 else
463 hv = 0, lv &= GET_MODE_MASK (op_mode);
465 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
466 d = real_value_truncate (mode, d);
467 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
470 if (GET_CODE (trueop) == CONST_INT
471 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
473 HOST_WIDE_INT arg0 = INTVAL (trueop);
474 HOST_WIDE_INT val;
476 switch (code)
478 case NOT:
479 val = ~ arg0;
480 break;
482 case NEG:
483 val = - arg0;
484 break;
486 case ABS:
487 val = (arg0 >= 0 ? arg0 : - arg0);
488 break;
490 case FFS:
491 /* Don't use ffs here. Instead, get low order bit and then its
492 number. If arg0 is zero, this will return 0, as desired. */
493 arg0 &= GET_MODE_MASK (mode);
494 val = exact_log2 (arg0 & (- arg0)) + 1;
495 break;
497 case CLZ:
498 arg0 &= GET_MODE_MASK (mode);
499 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
501 else
502 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
503 break;
505 case CTZ:
506 arg0 &= GET_MODE_MASK (mode);
507 if (arg0 == 0)
509 /* Even if the value at zero is undefined, we have to come
510 up with some replacement. Seems good enough. */
511 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
512 val = GET_MODE_BITSIZE (mode);
514 else
515 val = exact_log2 (arg0 & -arg0);
516 break;
518 case POPCOUNT:
519 arg0 &= GET_MODE_MASK (mode);
520 val = 0;
521 while (arg0)
522 val++, arg0 &= arg0 - 1;
523 break;
525 case PARITY:
526 arg0 &= GET_MODE_MASK (mode);
527 val = 0;
528 while (arg0)
529 val++, arg0 &= arg0 - 1;
530 val &= 1;
531 break;
533 case TRUNCATE:
534 val = arg0;
535 break;
537 case ZERO_EXTEND:
538 /* When zero-extending a CONST_INT, we need to know its
539 original mode. */
540 gcc_assert (op_mode != VOIDmode);
541 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
543 /* If we were really extending the mode,
544 we would have to distinguish between zero-extension
545 and sign-extension. */
546 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
547 val = arg0;
549 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
550 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
551 else
552 return 0;
553 break;
555 case SIGN_EXTEND:
556 if (op_mode == VOIDmode)
557 op_mode = mode;
558 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
560 /* If we were really extending the mode,
561 we would have to distinguish between zero-extension
562 and sign-extension. */
563 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
564 val = arg0;
566 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
569 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
570 if (val
571 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
572 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
574 else
575 return 0;
576 break;
578 case SQRT:
579 case FLOAT_EXTEND:
580 case FLOAT_TRUNCATE:
581 case SS_TRUNCATE:
582 case US_TRUNCATE:
583 return 0;
585 default:
586 gcc_unreachable ();
589 val = trunc_int_for_mode (val, mode);
591 return GEN_INT (val);
594 /* We can do some operations on integer CONST_DOUBLEs. Also allow
595 for a DImode operation on a CONST_INT. */
596 else if (GET_MODE (trueop) == VOIDmode
597 && width <= HOST_BITS_PER_WIDE_INT * 2
598 && (GET_CODE (trueop) == CONST_DOUBLE
599 || GET_CODE (trueop) == CONST_INT))
601 unsigned HOST_WIDE_INT l1, lv;
602 HOST_WIDE_INT h1, hv;
604 if (GET_CODE (trueop) == CONST_DOUBLE)
605 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
606 else
607 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
609 switch (code)
611 case NOT:
612 lv = ~ l1;
613 hv = ~ h1;
614 break;
616 case NEG:
617 neg_double (l1, h1, &lv, &hv);
618 break;
620 case ABS:
621 if (h1 < 0)
622 neg_double (l1, h1, &lv, &hv);
623 else
624 lv = l1, hv = h1;
625 break;
627 case FFS:
628 hv = 0;
629 if (l1 == 0)
631 if (h1 == 0)
632 lv = 0;
633 else
634 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
636 else
637 lv = exact_log2 (l1 & -l1) + 1;
638 break;
640 case CLZ:
641 hv = 0;
642 if (h1 != 0)
643 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
644 - HOST_BITS_PER_WIDE_INT;
645 else if (l1 != 0)
646 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
647 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
648 lv = GET_MODE_BITSIZE (mode);
649 break;
651 case CTZ:
652 hv = 0;
653 if (l1 != 0)
654 lv = exact_log2 (l1 & -l1);
655 else if (h1 != 0)
656 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
657 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
658 lv = GET_MODE_BITSIZE (mode);
659 break;
661 case POPCOUNT:
662 hv = 0;
663 lv = 0;
664 while (l1)
665 lv++, l1 &= l1 - 1;
666 while (h1)
667 lv++, h1 &= h1 - 1;
668 break;
670 case PARITY:
671 hv = 0;
672 lv = 0;
673 while (l1)
674 lv++, l1 &= l1 - 1;
675 while (h1)
676 lv++, h1 &= h1 - 1;
677 lv &= 1;
678 break;
680 case TRUNCATE:
681 /* This is just a change-of-mode, so do nothing. */
682 lv = l1, hv = h1;
683 break;
685 case ZERO_EXTEND:
686 gcc_assert (op_mode != VOIDmode);
688 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
689 return 0;
691 hv = 0;
692 lv = l1 & GET_MODE_MASK (op_mode);
693 break;
695 case SIGN_EXTEND:
696 if (op_mode == VOIDmode
697 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
698 return 0;
699 else
701 lv = l1 & GET_MODE_MASK (op_mode);
702 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
703 && (lv & ((HOST_WIDE_INT) 1
704 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
705 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
707 hv = HWI_SIGN_EXTEND (lv);
709 break;
711 case SQRT:
712 return 0;
714 default:
715 return 0;
718 return immed_double_const (lv, hv, mode);
721 else if (GET_CODE (trueop) == CONST_DOUBLE
722 && GET_MODE_CLASS (mode) == MODE_FLOAT)
724 REAL_VALUE_TYPE d, t;
725 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
727 switch (code)
729 case SQRT:
730 if (HONOR_SNANS (mode) && real_isnan (&d))
731 return 0;
732 real_sqrt (&t, mode, &d);
733 d = t;
734 break;
735 case ABS:
736 d = REAL_VALUE_ABS (d);
737 break;
738 case NEG:
739 d = REAL_VALUE_NEGATE (d);
740 break;
741 case FLOAT_TRUNCATE:
742 d = real_value_truncate (mode, d);
743 break;
744 case FLOAT_EXTEND:
745 /* All this does is change the mode. */
746 break;
747 case FIX:
748 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
749 break;
750 case NOT:
752 long tmp[4];
753 int i;
755 real_to_target (tmp, &d, GET_MODE (trueop));
756 for (i = 0; i < 4; i++)
757 tmp[i] = ~tmp[i];
758 real_from_target (&d, tmp, mode);
760 default:
761 gcc_unreachable ();
763 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
766 else if (GET_CODE (trueop) == CONST_DOUBLE
767 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
768 && GET_MODE_CLASS (mode) == MODE_INT
769 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
771 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
772 operators are intentionally left unspecified (to ease implementation
773 by target backends), for consistency, this routine implements the
774 same semantics for constant folding as used by the middle-end. */
776 HOST_WIDE_INT xh, xl, th, tl;
777 REAL_VALUE_TYPE x, t;
778 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
779 switch (code)
781 case FIX:
782 if (REAL_VALUE_ISNAN (x))
783 return const0_rtx;
785 /* Test against the signed upper bound. */
786 if (width > HOST_BITS_PER_WIDE_INT)
788 th = ((unsigned HOST_WIDE_INT) 1
789 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
790 tl = -1;
792 else
794 th = 0;
795 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
797 real_from_integer (&t, VOIDmode, tl, th, 0);
798 if (REAL_VALUES_LESS (t, x))
800 xh = th;
801 xl = tl;
802 break;
805 /* Test against the signed lower bound. */
806 if (width > HOST_BITS_PER_WIDE_INT)
808 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
809 tl = 0;
811 else
813 th = -1;
814 tl = (HOST_WIDE_INT) -1 << (width - 1);
816 real_from_integer (&t, VOIDmode, tl, th, 0);
817 if (REAL_VALUES_LESS (x, t))
819 xh = th;
820 xl = tl;
821 break;
823 REAL_VALUE_TO_INT (&xl, &xh, x);
824 break;
826 case UNSIGNED_FIX:
827 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
828 return const0_rtx;
830 /* Test against the unsigned upper bound. */
831 if (width == 2*HOST_BITS_PER_WIDE_INT)
833 th = -1;
834 tl = -1;
836 else if (width >= HOST_BITS_PER_WIDE_INT)
838 th = ((unsigned HOST_WIDE_INT) 1
839 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
840 tl = -1;
842 else
844 th = 0;
845 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
847 real_from_integer (&t, VOIDmode, tl, th, 1);
848 if (REAL_VALUES_LESS (t, x))
850 xh = th;
851 xl = tl;
852 break;
855 REAL_VALUE_TO_INT (&xl, &xh, x);
856 break;
858 default:
859 gcc_unreachable ();
861 return immed_double_const (xl, xh, mode);
864 /* This was formerly used only for non-IEEE float.
865 eggert@twinsun.com says it is safe for IEEE also. */
866 else
868 enum rtx_code reversed;
869 rtx temp;
871 /* There are some simplifications we can do even if the operands
872 aren't constant. */
873 switch (code)
875 case NOT:
876 /* (not (not X)) == X. */
877 if (GET_CODE (op) == NOT)
878 return XEXP (op, 0);
880 /* (not (eq X Y)) == (ne X Y), etc. */
881 if (COMPARISON_P (op)
882 && (mode == BImode || STORE_FLAG_VALUE == -1)
883 && ((reversed = reversed_comparison_code (op, NULL_RTX))
884 != UNKNOWN))
885 return simplify_gen_relational (reversed, mode, VOIDmode,
886 XEXP (op, 0), XEXP (op, 1));
888 /* (not (plus X -1)) can become (neg X). */
889 if (GET_CODE (op) == PLUS
890 && XEXP (op, 1) == constm1_rtx)
891 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
893 /* Similarly, (not (neg X)) is (plus X -1). */
894 if (GET_CODE (op) == NEG)
895 return plus_constant (XEXP (op, 0), -1);
897 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
898 if (GET_CODE (op) == XOR
899 && GET_CODE (XEXP (op, 1)) == CONST_INT
900 && (temp = simplify_unary_operation (NOT, mode,
901 XEXP (op, 1),
902 mode)) != 0)
903 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
905 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
906 if (GET_CODE (op) == PLUS
907 && GET_CODE (XEXP (op, 1)) == CONST_INT
908 && mode_signbit_p (mode, XEXP (op, 1))
909 && (temp = simplify_unary_operation (NOT, mode,
910 XEXP (op, 1),
911 mode)) != 0)
912 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
916 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
917 operands other than 1, but that is not valid. We could do a
918 similar simplification for (not (lshiftrt C X)) where C is
919 just the sign bit, but this doesn't seem common enough to
920 bother with. */
921 if (GET_CODE (op) == ASHIFT
922 && XEXP (op, 0) == const1_rtx)
924 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
925 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
928 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
929 by reversing the comparison code if valid. */
930 if (STORE_FLAG_VALUE == -1
931 && COMPARISON_P (op)
932 && (reversed = reversed_comparison_code (op, NULL_RTX))
933 != UNKNOWN)
934 return simplify_gen_relational (reversed, mode, VOIDmode,
935 XEXP (op, 0), XEXP (op, 1));
937 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
938 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
939 so we can perform the above simplification. */
941 if (STORE_FLAG_VALUE == -1
942 && GET_CODE (op) == ASHIFTRT
943 && GET_CODE (XEXP (op, 1)) == CONST_INT
944 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
945 return simplify_gen_relational (GE, mode, VOIDmode,
946 XEXP (op, 0), const0_rtx);
948 break;
950 case NEG:
951 /* (neg (neg X)) == X. */
952 if (GET_CODE (op) == NEG)
953 return XEXP (op, 0);
955 /* (neg (plus X 1)) can become (not X). */
956 if (GET_CODE (op) == PLUS
957 && XEXP (op, 1) == const1_rtx)
958 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
960 /* Similarly, (neg (not X)) is (plus X 1). */
961 if (GET_CODE (op) == NOT)
962 return plus_constant (XEXP (op, 0), 1);
964 /* (neg (minus X Y)) can become (minus Y X). This transformation
965 isn't safe for modes with signed zeros, since if X and Y are
966 both +0, (minus Y X) is the same as (minus X Y). If the
967 rounding mode is towards +infinity (or -infinity) then the two
968 expressions will be rounded differently. */
969 if (GET_CODE (op) == MINUS
970 && !HONOR_SIGNED_ZEROS (mode)
971 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
972 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
973 XEXP (op, 0));
975 if (GET_CODE (op) == PLUS
976 && !HONOR_SIGNED_ZEROS (mode)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
979 /* (neg (plus A C)) is simplified to (minus -C A). */
980 if (GET_CODE (XEXP (op, 1)) == CONST_INT
981 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
983 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
984 mode);
985 if (temp)
986 return simplify_gen_binary (MINUS, mode, temp,
987 XEXP (op, 0));
990 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
991 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
992 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
995 /* (neg (mult A B)) becomes (mult (neg A) B).
996 This works even for floating-point values. */
997 if (GET_CODE (op) == MULT
998 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1000 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1001 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1004 /* NEG commutes with ASHIFT since it is multiplication. Only do
1005 this if we can then eliminate the NEG (e.g., if the operand
1006 is a constant). */
1007 if (GET_CODE (op) == ASHIFT)
1009 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1010 mode);
1011 if (temp)
1012 return simplify_gen_binary (ASHIFT, mode, temp,
1013 XEXP (op, 1));
1016 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1017 C is equal to the width of MODE minus 1. */
1018 if (GET_CODE (op) == ASHIFTRT
1019 && GET_CODE (XEXP (op, 1)) == CONST_INT
1020 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1021 return simplify_gen_binary (LSHIFTRT, mode,
1022 XEXP (op, 0), XEXP (op, 1));
1024 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1025 C is equal to the width of MODE minus 1. */
1026 if (GET_CODE (op) == LSHIFTRT
1027 && GET_CODE (XEXP (op, 1)) == CONST_INT
1028 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1029 return simplify_gen_binary (ASHIFTRT, mode,
1030 XEXP (op, 0), XEXP (op, 1));
1032 break;
1034 case SIGN_EXTEND:
1035 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1036 becomes just the MINUS if its mode is MODE. This allows
1037 folding switch statements on machines using casesi (such as
1038 the VAX). */
1039 if (GET_CODE (op) == TRUNCATE
1040 && GET_MODE (XEXP (op, 0)) == mode
1041 && GET_CODE (XEXP (op, 0)) == MINUS
1042 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1043 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1044 return XEXP (op, 0);
1046 /* Check for a sign extension of a subreg of a promoted
1047 variable, where the promotion is sign-extended, and the
1048 target mode is the same as the variable's promotion. */
1049 if (GET_CODE (op) == SUBREG
1050 && SUBREG_PROMOTED_VAR_P (op)
1051 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1052 && GET_MODE (XEXP (op, 0)) == mode)
1053 return XEXP (op, 0);
1055 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1056 if (! POINTERS_EXTEND_UNSIGNED
1057 && mode == Pmode && GET_MODE (op) == ptr_mode
1058 && (CONSTANT_P (op)
1059 || (GET_CODE (op) == SUBREG
1060 && REG_P (SUBREG_REG (op))
1061 && REG_POINTER (SUBREG_REG (op))
1062 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1063 return convert_memory_address (Pmode, op);
1064 #endif
1065 break;
1067 case ZERO_EXTEND:
1068 /* Check for a zero extension of a subreg of a promoted
1069 variable, where the promotion is zero-extended, and the
1070 target mode is the same as the variable's promotion. */
1071 if (GET_CODE (op) == SUBREG
1072 && SUBREG_PROMOTED_VAR_P (op)
1073 && SUBREG_PROMOTED_UNSIGNED_P (op)
1074 && GET_MODE (XEXP (op, 0)) == mode)
1075 return XEXP (op, 0);
1077 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1078 if (POINTERS_EXTEND_UNSIGNED > 0
1079 && mode == Pmode && GET_MODE (op) == ptr_mode
1080 && (CONSTANT_P (op)
1081 || (GET_CODE (op) == SUBREG
1082 && REG_P (SUBREG_REG (op))
1083 && REG_POINTER (SUBREG_REG (op))
1084 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1085 return convert_memory_address (Pmode, op);
1086 #endif
1087 break;
1089 default:
1090 break;
1093 return 0;
1097 /* Subroutine of simplify_binary_operation to simplify a commutative,
1098 associative binary operation CODE with result mode MODE, operating
1099 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1100 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1101 canonicalization is possible. */
1103 static rtx
1104 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1105 rtx op0, rtx op1)
1107 rtx tem;
1109 /* Linearize the operator to the left. */
1110 if (GET_CODE (op1) == code)
1112 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1113 if (GET_CODE (op0) == code)
1115 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1116 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1119 /* "a op (b op c)" becomes "(b op c) op a". */
1120 if (! swap_commutative_operands_p (op1, op0))
1121 return simplify_gen_binary (code, mode, op1, op0);
1123 tem = op0;
1124 op0 = op1;
1125 op1 = tem;
1128 if (GET_CODE (op0) == code)
1130 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1131 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1133 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1134 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1137 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1138 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1139 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1140 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1141 if (tem != 0)
1142 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1144 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1145 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1146 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1147 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1148 if (tem != 0)
1149 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1152 return 0;
1155 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1156 and OP1. Return 0 if no simplification is possible.
1158 Don't use this for relational operations such as EQ or LT.
1159 Use simplify_relational_operation instead. */
1161 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1162 rtx op0, rtx op1)
1164 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1165 HOST_WIDE_INT val;
1166 unsigned int width = GET_MODE_BITSIZE (mode);
1167 rtx trueop0, trueop1;
1168 rtx tem;
1170 /* Relational operations don't work here. We must know the mode
1171 of the operands in order to do the comparison correctly.
1172 Assuming a full word can give incorrect results.
1173 Consider comparing 128 with -128 in QImode. */
1174 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1175 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1177 /* Make sure the constant is second. */
1178 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1179 && swap_commutative_operands_p (op0, op1))
1181 tem = op0, op0 = op1, op1 = tem;
1184 trueop0 = avoid_constant_pool_reference (op0);
1185 trueop1 = avoid_constant_pool_reference (op1);
1187 if (VECTOR_MODE_P (mode)
1188 && GET_CODE (trueop0) == CONST_VECTOR
1189 && GET_CODE (trueop1) == CONST_VECTOR)
1191 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1192 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1193 enum machine_mode op0mode = GET_MODE (trueop0);
1194 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1195 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1196 enum machine_mode op1mode = GET_MODE (trueop1);
1197 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1198 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1199 rtvec v = rtvec_alloc (n_elts);
1200 unsigned int i;
1202 gcc_assert (op0_n_elts == n_elts);
1203 gcc_assert (op1_n_elts == n_elts);
1204 for (i = 0; i < n_elts; i++)
1206 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1207 CONST_VECTOR_ELT (trueop0, i),
1208 CONST_VECTOR_ELT (trueop1, i));
1209 if (!x)
1210 return 0;
1211 RTVEC_ELT (v, i) = x;
1214 return gen_rtx_CONST_VECTOR (mode, v);
1217 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1218 && GET_CODE (trueop0) == CONST_DOUBLE
1219 && GET_CODE (trueop1) == CONST_DOUBLE
1220 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1222 if (code == AND
1223 || code == IOR
1224 || code == XOR)
1226 long tmp0[4];
1227 long tmp1[4];
1228 REAL_VALUE_TYPE r;
1229 int i;
1231 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1232 GET_MODE (op0));
1233 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1234 GET_MODE (op1));
1235 for (i = 0; i < 4; i++)
1237 switch (code)
1239 case AND:
1240 tmp0[i] &= tmp1[i];
1241 break;
1242 case IOR:
1243 tmp0[i] |= tmp1[i];
1244 break;
1245 case XOR:
1246 tmp0[i] ^= tmp1[i];
1247 break;
1248 default:
1249 gcc_unreachable ();
1252 real_from_target (&r, tmp0, mode);
1253 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1255 else
1257 REAL_VALUE_TYPE f0, f1, value;
1259 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1260 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1261 f0 = real_value_truncate (mode, f0);
1262 f1 = real_value_truncate (mode, f1);
1264 if (HONOR_SNANS (mode)
1265 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1266 return 0;
1268 if (code == DIV
1269 && REAL_VALUES_EQUAL (f1, dconst0)
1270 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1271 return 0;
1273 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1274 && flag_trapping_math
1275 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1277 int s0 = REAL_VALUE_NEGATIVE (f0);
1278 int s1 = REAL_VALUE_NEGATIVE (f1);
1280 switch (code)
1282 case PLUS:
1283 /* Inf + -Inf = NaN plus exception. */
1284 if (s0 != s1)
1285 return 0;
1286 break;
1287 case MINUS:
1288 /* Inf - Inf = NaN plus exception. */
1289 if (s0 == s1)
1290 return 0;
1291 break;
1292 case DIV:
1293 /* Inf / Inf = NaN plus exception. */
1294 return 0;
1295 default:
1296 break;
1300 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1301 && flag_trapping_math
1302 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1303 || (REAL_VALUE_ISINF (f1)
1304 && REAL_VALUES_EQUAL (f0, dconst0))))
1305 /* Inf * 0 = NaN plus exception. */
1306 return 0;
1308 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1310 value = real_value_truncate (mode, value);
1311 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1315 /* We can fold some multi-word operations. */
1316 if (GET_MODE_CLASS (mode) == MODE_INT
1317 && width == HOST_BITS_PER_WIDE_INT * 2
1318 && (GET_CODE (trueop0) == CONST_DOUBLE
1319 || GET_CODE (trueop0) == CONST_INT)
1320 && (GET_CODE (trueop1) == CONST_DOUBLE
1321 || GET_CODE (trueop1) == CONST_INT))
1323 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1324 HOST_WIDE_INT h1, h2, hv, ht;
1326 if (GET_CODE (trueop0) == CONST_DOUBLE)
1327 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1328 else
1329 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1331 if (GET_CODE (trueop1) == CONST_DOUBLE)
1332 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1333 else
1334 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1336 switch (code)
1338 case MINUS:
1339 /* A - B == A + (-B). */
1340 neg_double (l2, h2, &lv, &hv);
1341 l2 = lv, h2 = hv;
1343 /* Fall through.... */
1345 case PLUS:
1346 add_double (l1, h1, l2, h2, &lv, &hv);
1347 break;
1349 case MULT:
1350 mul_double (l1, h1, l2, h2, &lv, &hv);
1351 break;
1353 case DIV:
1354 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1355 &lv, &hv, &lt, &ht))
1356 return 0;
1357 break;
1359 case MOD:
1360 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1361 &lt, &ht, &lv, &hv))
1362 return 0;
1363 break;
1365 case UDIV:
1366 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1367 &lv, &hv, &lt, &ht))
1368 return 0;
1369 break;
1371 case UMOD:
1372 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1373 &lt, &ht, &lv, &hv))
1374 return 0;
1375 break;
1377 case AND:
1378 lv = l1 & l2, hv = h1 & h2;
1379 break;
1381 case IOR:
1382 lv = l1 | l2, hv = h1 | h2;
1383 break;
1385 case XOR:
1386 lv = l1 ^ l2, hv = h1 ^ h2;
1387 break;
1389 case SMIN:
1390 if (h1 < h2
1391 || (h1 == h2
1392 && ((unsigned HOST_WIDE_INT) l1
1393 < (unsigned HOST_WIDE_INT) l2)))
1394 lv = l1, hv = h1;
1395 else
1396 lv = l2, hv = h2;
1397 break;
1399 case SMAX:
1400 if (h1 > h2
1401 || (h1 == h2
1402 && ((unsigned HOST_WIDE_INT) l1
1403 > (unsigned HOST_WIDE_INT) l2)))
1404 lv = l1, hv = h1;
1405 else
1406 lv = l2, hv = h2;
1407 break;
1409 case UMIN:
1410 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1411 || (h1 == h2
1412 && ((unsigned HOST_WIDE_INT) l1
1413 < (unsigned HOST_WIDE_INT) l2)))
1414 lv = l1, hv = h1;
1415 else
1416 lv = l2, hv = h2;
1417 break;
1419 case UMAX:
1420 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1421 || (h1 == h2
1422 && ((unsigned HOST_WIDE_INT) l1
1423 > (unsigned HOST_WIDE_INT) l2)))
1424 lv = l1, hv = h1;
1425 else
1426 lv = l2, hv = h2;
1427 break;
1429 case LSHIFTRT: case ASHIFTRT:
1430 case ASHIFT:
1431 case ROTATE: case ROTATERT:
1432 if (SHIFT_COUNT_TRUNCATED)
1433 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1435 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1436 return 0;
1438 if (code == LSHIFTRT || code == ASHIFTRT)
1439 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1440 code == ASHIFTRT);
1441 else if (code == ASHIFT)
1442 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1443 else if (code == ROTATE)
1444 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1445 else /* code == ROTATERT */
1446 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1447 break;
1449 default:
1450 return 0;
1453 return immed_double_const (lv, hv, mode);
1456 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1457 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1459 /* Even if we can't compute a constant result,
1460 there are some cases worth simplifying. */
1462 switch (code)
1464 case PLUS:
1465 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1466 when x is NaN, infinite, or finite and nonzero. They aren't
1467 when x is -0 and the rounding mode is not towards -infinity,
1468 since (-0) + 0 is then 0. */
1469 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1470 return op0;
1472 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1473 transformations are safe even for IEEE. */
1474 if (GET_CODE (op0) == NEG)
1475 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1476 else if (GET_CODE (op1) == NEG)
1477 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1479 /* (~a) + 1 -> -a */
1480 if (INTEGRAL_MODE_P (mode)
1481 && GET_CODE (op0) == NOT
1482 && trueop1 == const1_rtx)
1483 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1485 /* Handle both-operands-constant cases. We can only add
1486 CONST_INTs to constants since the sum of relocatable symbols
1487 can't be handled by most assemblers. Don't add CONST_INT
1488 to CONST_INT since overflow won't be computed properly if wider
1489 than HOST_BITS_PER_WIDE_INT. */
1491 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1492 && GET_CODE (op1) == CONST_INT)
1493 return plus_constant (op0, INTVAL (op1));
1494 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1495 && GET_CODE (op0) == CONST_INT)
1496 return plus_constant (op1, INTVAL (op0));
1498 /* See if this is something like X * C - X or vice versa or
1499 if the multiplication is written as a shift. If so, we can
1500 distribute and make a new multiply, shift, or maybe just
1501 have X (if C is 2 in the example above). But don't make
1502 something more expensive than we had before. */
1504 if (! FLOAT_MODE_P (mode))
1506 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1507 rtx lhs = op0, rhs = op1;
1509 if (GET_CODE (lhs) == NEG)
1510 coeff0 = -1, lhs = XEXP (lhs, 0);
1511 else if (GET_CODE (lhs) == MULT
1512 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1514 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1516 else if (GET_CODE (lhs) == ASHIFT
1517 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1518 && INTVAL (XEXP (lhs, 1)) >= 0
1519 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1521 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1522 lhs = XEXP (lhs, 0);
1525 if (GET_CODE (rhs) == NEG)
1526 coeff1 = -1, rhs = XEXP (rhs, 0);
1527 else if (GET_CODE (rhs) == MULT
1528 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1530 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1532 else if (GET_CODE (rhs) == ASHIFT
1533 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1534 && INTVAL (XEXP (rhs, 1)) >= 0
1535 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1537 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1538 rhs = XEXP (rhs, 0);
1541 if (rtx_equal_p (lhs, rhs))
1543 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1544 tem = simplify_gen_binary (MULT, mode, lhs,
1545 GEN_INT (coeff0 + coeff1));
1546 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1547 ? tem : 0;
1551 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1552 if ((GET_CODE (op1) == CONST_INT
1553 || GET_CODE (op1) == CONST_DOUBLE)
1554 && GET_CODE (op0) == XOR
1555 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1556 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1557 && mode_signbit_p (mode, op1))
1558 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1559 simplify_gen_binary (XOR, mode, op1,
1560 XEXP (op0, 1)));
1562 /* If one of the operands is a PLUS or a MINUS, see if we can
1563 simplify this by the associative law.
1564 Don't use the associative law for floating point.
1565 The inaccuracy makes it nonassociative,
1566 and subtle programs can break if operations are associated. */
1568 if (INTEGRAL_MODE_P (mode)
1569 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1570 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1571 || (GET_CODE (op0) == CONST
1572 && GET_CODE (XEXP (op0, 0)) == PLUS)
1573 || (GET_CODE (op1) == CONST
1574 && GET_CODE (XEXP (op1, 0)) == PLUS))
1575 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1576 return tem;
1578 /* Reassociate floating point addition only when the user
1579 specifies unsafe math optimizations. */
1580 if (FLOAT_MODE_P (mode)
1581 && flag_unsafe_math_optimizations)
1583 tem = simplify_associative_operation (code, mode, op0, op1);
1584 if (tem)
1585 return tem;
1587 break;
1589 case COMPARE:
1590 #ifdef HAVE_cc0
1591 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1592 using cc0, in which case we want to leave it as a COMPARE
1593 so we can distinguish it from a register-register-copy.
1595 In IEEE floating point, x-0 is not the same as x. */
1597 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1598 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1599 && trueop1 == CONST0_RTX (mode))
1600 return op0;
1601 #endif
1603 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1604 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1605 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1606 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1608 rtx xop00 = XEXP (op0, 0);
1609 rtx xop10 = XEXP (op1, 0);
1611 #ifdef HAVE_cc0
1612 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1613 #else
1614 if (REG_P (xop00) && REG_P (xop10)
1615 && GET_MODE (xop00) == GET_MODE (xop10)
1616 && REGNO (xop00) == REGNO (xop10)
1617 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1618 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1619 #endif
1620 return xop00;
1622 break;
1624 case MINUS:
1625 /* We can't assume x-x is 0 even with non-IEEE floating point,
1626 but since it is zero except in very strange circumstances, we
1627 will treat it as zero with -funsafe-math-optimizations. */
1628 if (rtx_equal_p (trueop0, trueop1)
1629 && ! side_effects_p (op0)
1630 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1631 return CONST0_RTX (mode);
1633 /* Change subtraction from zero into negation. (0 - x) is the
1634 same as -x when x is NaN, infinite, or finite and nonzero.
1635 But if the mode has signed zeros, and does not round towards
1636 -infinity, then 0 - 0 is 0, not -0. */
1637 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1638 return simplify_gen_unary (NEG, mode, op1, mode);
1640 /* (-1 - a) is ~a. */
1641 if (trueop0 == constm1_rtx)
1642 return simplify_gen_unary (NOT, mode, op1, mode);
1644 /* Subtracting 0 has no effect unless the mode has signed zeros
1645 and supports rounding towards -infinity. In such a case,
1646 0 - 0 is -0. */
1647 if (!(HONOR_SIGNED_ZEROS (mode)
1648 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1649 && trueop1 == CONST0_RTX (mode))
1650 return op0;
1652 /* See if this is something like X * C - X or vice versa or
1653 if the multiplication is written as a shift. If so, we can
1654 distribute and make a new multiply, shift, or maybe just
1655 have X (if C is 2 in the example above). But don't make
1656 something more expensive than we had before. */
1658 if (! FLOAT_MODE_P (mode))
1660 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1661 rtx lhs = op0, rhs = op1;
1663 if (GET_CODE (lhs) == NEG)
1664 coeff0 = -1, lhs = XEXP (lhs, 0);
1665 else if (GET_CODE (lhs) == MULT
1666 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1668 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1670 else if (GET_CODE (lhs) == ASHIFT
1671 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1672 && INTVAL (XEXP (lhs, 1)) >= 0
1673 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1675 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1676 lhs = XEXP (lhs, 0);
1679 if (GET_CODE (rhs) == NEG)
1680 coeff1 = - 1, rhs = XEXP (rhs, 0);
1681 else if (GET_CODE (rhs) == MULT
1682 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1684 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1686 else if (GET_CODE (rhs) == ASHIFT
1687 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1688 && INTVAL (XEXP (rhs, 1)) >= 0
1689 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1691 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1692 rhs = XEXP (rhs, 0);
1695 if (rtx_equal_p (lhs, rhs))
1697 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1698 tem = simplify_gen_binary (MULT, mode, lhs,
1699 GEN_INT (coeff0 - coeff1));
1700 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1701 ? tem : 0;
1705 /* (a - (-b)) -> (a + b). True even for IEEE. */
1706 if (GET_CODE (op1) == NEG)
1707 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1709 /* (-x - c) may be simplified as (-c - x). */
1710 if (GET_CODE (op0) == NEG
1711 && (GET_CODE (op1) == CONST_INT
1712 || GET_CODE (op1) == CONST_DOUBLE))
1714 tem = simplify_unary_operation (NEG, mode, op1, mode);
1715 if (tem)
1716 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1719 /* If one of the operands is a PLUS or a MINUS, see if we can
1720 simplify this by the associative law.
1721 Don't use the associative law for floating point.
1722 The inaccuracy makes it nonassociative,
1723 and subtle programs can break if operations are associated. */
1725 if (INTEGRAL_MODE_P (mode)
1726 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1727 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1728 || (GET_CODE (op0) == CONST
1729 && GET_CODE (XEXP (op0, 0)) == PLUS)
1730 || (GET_CODE (op1) == CONST
1731 && GET_CODE (XEXP (op1, 0)) == PLUS))
1732 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1733 return tem;
1735 /* Don't let a relocatable value get a negative coeff. */
1736 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1737 return simplify_gen_binary (PLUS, mode,
1738 op0,
1739 neg_const_int (mode, op1));
1741 /* (x - (x & y)) -> (x & ~y) */
1742 if (GET_CODE (op1) == AND)
1744 if (rtx_equal_p (op0, XEXP (op1, 0)))
1746 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1747 GET_MODE (XEXP (op1, 1)));
1748 return simplify_gen_binary (AND, mode, op0, tem);
1750 if (rtx_equal_p (op0, XEXP (op1, 1)))
1752 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1753 GET_MODE (XEXP (op1, 0)));
1754 return simplify_gen_binary (AND, mode, op0, tem);
1757 break;
1759 case MULT:
1760 if (trueop1 == constm1_rtx)
1761 return simplify_gen_unary (NEG, mode, op0, mode);
1763 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1764 x is NaN, since x * 0 is then also NaN. Nor is it valid
1765 when the mode has signed zeros, since multiplying a negative
1766 number by 0 will give -0, not 0. */
1767 if (!HONOR_NANS (mode)
1768 && !HONOR_SIGNED_ZEROS (mode)
1769 && trueop1 == CONST0_RTX (mode)
1770 && ! side_effects_p (op0))
1771 return op1;
1773 /* In IEEE floating point, x*1 is not equivalent to x for
1774 signalling NaNs. */
1775 if (!HONOR_SNANS (mode)
1776 && trueop1 == CONST1_RTX (mode))
1777 return op0;
1779 /* Convert multiply by constant power of two into shift unless
1780 we are still generating RTL. This test is a kludge. */
1781 if (GET_CODE (trueop1) == CONST_INT
1782 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1783 /* If the mode is larger than the host word size, and the
1784 uppermost bit is set, then this isn't a power of two due
1785 to implicit sign extension. */
1786 && (width <= HOST_BITS_PER_WIDE_INT
1787 || val != HOST_BITS_PER_WIDE_INT - 1))
1788 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1790 /* x*2 is x+x and x*(-1) is -x */
1791 if (GET_CODE (trueop1) == CONST_DOUBLE
1792 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1793 && GET_MODE (op0) == mode)
1795 REAL_VALUE_TYPE d;
1796 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1798 if (REAL_VALUES_EQUAL (d, dconst2))
1799 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1801 if (REAL_VALUES_EQUAL (d, dconstm1))
1802 return simplify_gen_unary (NEG, mode, op0, mode);
1805 /* Reassociate multiplication, but for floating point MULTs
1806 only when the user specifies unsafe math optimizations. */
1807 if (! FLOAT_MODE_P (mode)
1808 || flag_unsafe_math_optimizations)
1810 tem = simplify_associative_operation (code, mode, op0, op1);
1811 if (tem)
1812 return tem;
1814 break;
1816 case IOR:
1817 if (trueop1 == const0_rtx)
1818 return op0;
1819 if (GET_CODE (trueop1) == CONST_INT
1820 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1821 == GET_MODE_MASK (mode)))
1822 return op1;
1823 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1824 return op0;
1825 /* A | (~A) -> -1 */
1826 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1827 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1828 && ! side_effects_p (op0)
1829 && GET_MODE_CLASS (mode) != MODE_CC)
1830 return constm1_rtx;
1831 tem = simplify_associative_operation (code, mode, op0, op1);
1832 if (tem)
1833 return tem;
1834 break;
1836 case XOR:
1837 if (trueop1 == const0_rtx)
1838 return op0;
1839 if (GET_CODE (trueop1) == CONST_INT
1840 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1841 == GET_MODE_MASK (mode)))
1842 return simplify_gen_unary (NOT, mode, op0, mode);
1843 if (trueop0 == trueop1
1844 && ! side_effects_p (op0)
1845 && GET_MODE_CLASS (mode) != MODE_CC)
1846 return const0_rtx;
1848 /* Canonicalize XOR of the most significant bit to PLUS. */
1849 if ((GET_CODE (op1) == CONST_INT
1850 || GET_CODE (op1) == CONST_DOUBLE)
1851 && mode_signbit_p (mode, op1))
1852 return simplify_gen_binary (PLUS, mode, op0, op1);
1853 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1854 if ((GET_CODE (op1) == CONST_INT
1855 || GET_CODE (op1) == CONST_DOUBLE)
1856 && GET_CODE (op0) == PLUS
1857 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1858 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1859 && mode_signbit_p (mode, XEXP (op0, 1)))
1860 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1861 simplify_gen_binary (XOR, mode, op1,
1862 XEXP (op0, 1)));
1864 tem = simplify_associative_operation (code, mode, op0, op1);
1865 if (tem)
1866 return tem;
1867 break;
1869 case AND:
1870 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1871 return const0_rtx;
1872 /* If we are turning off bits already known off in OP0, we need
1873 not do an AND. */
1874 if (GET_CODE (trueop1) == CONST_INT
1875 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1876 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1877 return op0;
1878 if (trueop0 == trueop1 && ! side_effects_p (op0)
1879 && GET_MODE_CLASS (mode) != MODE_CC)
1880 return op0;
1881 /* A & (~A) -> 0 */
1882 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1883 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1884 && ! side_effects_p (op0)
1885 && GET_MODE_CLASS (mode) != MODE_CC)
1886 return const0_rtx;
1887 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1888 ((A & N) + B) & M -> (A + B) & M
1889 Similarly if (N & M) == 0,
1890 ((A | N) + B) & M -> (A + B) & M
1891 and for - instead of + and/or ^ instead of |. */
1892 if (GET_CODE (trueop1) == CONST_INT
1893 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1894 && ~INTVAL (trueop1)
1895 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1896 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1898 rtx pmop[2];
1899 int which;
1901 pmop[0] = XEXP (op0, 0);
1902 pmop[1] = XEXP (op0, 1);
1904 for (which = 0; which < 2; which++)
1906 tem = pmop[which];
1907 switch (GET_CODE (tem))
1909 case AND:
1910 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1911 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1912 == INTVAL (trueop1))
1913 pmop[which] = XEXP (tem, 0);
1914 break;
1915 case IOR:
1916 case XOR:
1917 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1918 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1919 pmop[which] = XEXP (tem, 0);
1920 break;
1921 default:
1922 break;
1926 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1928 tem = simplify_gen_binary (GET_CODE (op0), mode,
1929 pmop[0], pmop[1]);
1930 return simplify_gen_binary (code, mode, tem, op1);
1933 tem = simplify_associative_operation (code, mode, op0, op1);
1934 if (tem)
1935 return tem;
1936 break;
1938 case UDIV:
1939 /* 0/x is 0 (or x&0 if x has side-effects). */
1940 if (trueop0 == const0_rtx)
1941 return side_effects_p (op1)
1942 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1943 : const0_rtx;
1944 /* x/1 is x. */
1945 if (trueop1 == const1_rtx)
1947 /* Handle narrowing UDIV. */
1948 rtx x = gen_lowpart_common (mode, op0);
1949 if (x)
1950 return x;
1951 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1952 return gen_lowpart_SUBREG (mode, op0);
1953 return op0;
1955 /* Convert divide by power of two into shift. */
1956 if (GET_CODE (trueop1) == CONST_INT
1957 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1958 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1959 break;
1961 case DIV:
1962 /* Handle floating point and integers separately. */
1963 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1965 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1966 safe for modes with NaNs, since 0.0 / 0.0 will then be
1967 NaN rather than 0.0. Nor is it safe for modes with signed
1968 zeros, since dividing 0 by a negative number gives -0.0 */
1969 if (trueop0 == CONST0_RTX (mode)
1970 && !HONOR_NANS (mode)
1971 && !HONOR_SIGNED_ZEROS (mode)
1972 && ! side_effects_p (op1))
1973 return op0;
1974 /* x/1.0 is x. */
1975 if (trueop1 == CONST1_RTX (mode)
1976 && !HONOR_SNANS (mode))
1977 return op0;
1979 if (GET_CODE (trueop1) == CONST_DOUBLE
1980 && trueop1 != CONST0_RTX (mode))
1982 REAL_VALUE_TYPE d;
1983 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1985 /* x/-1.0 is -x. */
1986 if (REAL_VALUES_EQUAL (d, dconstm1)
1987 && !HONOR_SNANS (mode))
1988 return simplify_gen_unary (NEG, mode, op0, mode);
1990 /* Change FP division by a constant into multiplication.
1991 Only do this with -funsafe-math-optimizations. */
1992 if (flag_unsafe_math_optimizations
1993 && !REAL_VALUES_EQUAL (d, dconst0))
1995 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1996 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1997 return simplify_gen_binary (MULT, mode, op0, tem);
2001 else
2003 /* 0/x is 0 (or x&0 if x has side-effects). */
2004 if (trueop0 == const0_rtx)
2005 return side_effects_p (op1)
2006 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2007 : const0_rtx;
2008 /* x/1 is x. */
2009 if (trueop1 == const1_rtx)
2011 /* Handle narrowing DIV. */
2012 rtx x = gen_lowpart_common (mode, op0);
2013 if (x)
2014 return x;
2015 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2016 return gen_lowpart_SUBREG (mode, op0);
2017 return op0;
2019 /* x/-1 is -x. */
2020 if (trueop1 == constm1_rtx)
2022 rtx x = gen_lowpart_common (mode, op0);
2023 if (!x)
2024 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2025 ? gen_lowpart_SUBREG (mode, op0) : op0;
2026 return simplify_gen_unary (NEG, mode, x, mode);
2029 break;
2031 case UMOD:
2032 /* 0%x is 0 (or x&0 if x has side-effects). */
2033 if (trueop0 == const0_rtx)
2034 return side_effects_p (op1)
2035 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2036 : const0_rtx;
2037 /* x%1 is 0 (of x&0 if x has side-effects). */
2038 if (trueop1 == const1_rtx)
2039 return side_effects_p (op0)
2040 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2041 : const0_rtx;
2042 /* Implement modulus by power of two as AND. */
2043 if (GET_CODE (trueop1) == CONST_INT
2044 && exact_log2 (INTVAL (trueop1)) > 0)
2045 return simplify_gen_binary (AND, mode, op0,
2046 GEN_INT (INTVAL (op1) - 1));
2047 break;
2049 case MOD:
2050 /* 0%x is 0 (or x&0 if x has side-effects). */
2051 if (trueop0 == const0_rtx)
2052 return side_effects_p (op1)
2053 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2054 : const0_rtx;
2055 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2056 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
2057 return side_effects_p (op0)
2058 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2059 : const0_rtx;
2060 break;
2062 case ROTATERT:
2063 case ROTATE:
2064 case ASHIFTRT:
2065 /* Rotating ~0 always results in ~0. */
2066 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2067 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2068 && ! side_effects_p (op1))
2069 return op0;
2071 /* Fall through.... */
2073 case ASHIFT:
2074 case LSHIFTRT:
2075 if (trueop1 == const0_rtx)
2076 return op0;
2077 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2078 return op0;
2079 break;
2081 case SMIN:
2082 if (width <= HOST_BITS_PER_WIDE_INT
2083 && GET_CODE (trueop1) == CONST_INT
2084 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2085 && ! side_effects_p (op0))
2086 return op1;
2087 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2088 return op0;
2089 tem = simplify_associative_operation (code, mode, op0, op1);
2090 if (tem)
2091 return tem;
2092 break;
2094 case SMAX:
2095 if (width <= HOST_BITS_PER_WIDE_INT
2096 && GET_CODE (trueop1) == CONST_INT
2097 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2098 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2099 && ! side_effects_p (op0))
2100 return op1;
2101 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2102 return op0;
2103 tem = simplify_associative_operation (code, mode, op0, op1);
2104 if (tem)
2105 return tem;
2106 break;
2108 case UMIN:
2109 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2110 return op1;
2111 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2112 return op0;
2113 tem = simplify_associative_operation (code, mode, op0, op1);
2114 if (tem)
2115 return tem;
2116 break;
2118 case UMAX:
2119 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2120 return op1;
2121 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2122 return op0;
2123 tem = simplify_associative_operation (code, mode, op0, op1);
2124 if (tem)
2125 return tem;
2126 break;
2128 case SS_PLUS:
2129 case US_PLUS:
2130 case SS_MINUS:
2131 case US_MINUS:
2132 /* ??? There are simplifications that can be done. */
2133 return 0;
2135 case VEC_SELECT:
2136 if (!VECTOR_MODE_P (mode))
2138 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2139 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2140 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2141 gcc_assert (XVECLEN (trueop1, 0) == 1);
2142 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2144 if (GET_CODE (trueop0) == CONST_VECTOR)
2145 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2146 (trueop1, 0, 0)));
2148 else
2150 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2151 gcc_assert (GET_MODE_INNER (mode)
2152 == GET_MODE_INNER (GET_MODE (trueop0)));
2153 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2155 if (GET_CODE (trueop0) == CONST_VECTOR)
2157 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2158 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2159 rtvec v = rtvec_alloc (n_elts);
2160 unsigned int i;
2162 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2163 for (i = 0; i < n_elts; i++)
2165 rtx x = XVECEXP (trueop1, 0, i);
2167 gcc_assert (GET_CODE (x) == CONST_INT);
2168 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2169 INTVAL (x));
2172 return gen_rtx_CONST_VECTOR (mode, v);
2175 return 0;
2176 case VEC_CONCAT:
2178 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2179 ? GET_MODE (trueop0)
2180 : GET_MODE_INNER (mode));
2181 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2182 ? GET_MODE (trueop1)
2183 : GET_MODE_INNER (mode));
2185 gcc_assert (VECTOR_MODE_P (mode));
2186 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2187 == GET_MODE_SIZE (mode));
2189 if (VECTOR_MODE_P (op0_mode))
2190 gcc_assert (GET_MODE_INNER (mode)
2191 == GET_MODE_INNER (op0_mode));
2192 else
2193 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2195 if (VECTOR_MODE_P (op1_mode))
2196 gcc_assert (GET_MODE_INNER (mode)
2197 == GET_MODE_INNER (op1_mode));
2198 else
2199 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2201 if ((GET_CODE (trueop0) == CONST_VECTOR
2202 || GET_CODE (trueop0) == CONST_INT
2203 || GET_CODE (trueop0) == CONST_DOUBLE)
2204 && (GET_CODE (trueop1) == CONST_VECTOR
2205 || GET_CODE (trueop1) == CONST_INT
2206 || GET_CODE (trueop1) == CONST_DOUBLE))
2208 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2209 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2210 rtvec v = rtvec_alloc (n_elts);
2211 unsigned int i;
2212 unsigned in_n_elts = 1;
2214 if (VECTOR_MODE_P (op0_mode))
2215 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2216 for (i = 0; i < n_elts; i++)
2218 if (i < in_n_elts)
2220 if (!VECTOR_MODE_P (op0_mode))
2221 RTVEC_ELT (v, i) = trueop0;
2222 else
2223 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2225 else
2227 if (!VECTOR_MODE_P (op1_mode))
2228 RTVEC_ELT (v, i) = trueop1;
2229 else
2230 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2231 i - in_n_elts);
2235 return gen_rtx_CONST_VECTOR (mode, v);
2238 return 0;
2240 default:
2241 gcc_unreachable ();
2244 return 0;
2247 /* Get the integer argument values in two forms:
2248 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2250 arg0 = INTVAL (trueop0);
2251 arg1 = INTVAL (trueop1);
2253 if (width < HOST_BITS_PER_WIDE_INT)
2255 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2256 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2258 arg0s = arg0;
2259 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2260 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2262 arg1s = arg1;
2263 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2264 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2266 else
2268 arg0s = arg0;
2269 arg1s = arg1;
2272 /* Compute the value of the arithmetic. */
2274 switch (code)
2276 case PLUS:
2277 val = arg0s + arg1s;
2278 break;
2280 case MINUS:
2281 val = arg0s - arg1s;
2282 break;
2284 case MULT:
2285 val = arg0s * arg1s;
2286 break;
2288 case DIV:
2289 if (arg1s == 0
2290 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2291 && arg1s == -1))
2292 return 0;
2293 val = arg0s / arg1s;
2294 break;
2296 case MOD:
2297 if (arg1s == 0
2298 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2299 && arg1s == -1))
2300 return 0;
2301 val = arg0s % arg1s;
2302 break;
2304 case UDIV:
2305 if (arg1 == 0
2306 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2307 && arg1s == -1))
2308 return 0;
2309 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2310 break;
2312 case UMOD:
2313 if (arg1 == 0
2314 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2315 && arg1s == -1))
2316 return 0;
2317 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2318 break;
2320 case AND:
2321 val = arg0 & arg1;
2322 break;
2324 case IOR:
2325 val = arg0 | arg1;
2326 break;
2328 case XOR:
2329 val = arg0 ^ arg1;
2330 break;
2332 case LSHIFTRT:
2333 case ASHIFT:
2334 case ASHIFTRT:
2335 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2336 value is in range. We can't return any old value for out-of-range
2337 arguments because either the middle-end (via shift_truncation_mask)
2338 or the back-end might be relying on target-specific knowledge.
2339 Nor can we rely on shift_truncation_mask, since the shift might
2340 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2341 if (SHIFT_COUNT_TRUNCATED)
2342 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2343 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2344 return 0;
2346 val = (code == ASHIFT
2347 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2348 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2350 /* Sign-extend the result for arithmetic right shifts. */
2351 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2352 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2353 break;
2355 case ROTATERT:
2356 if (arg1 < 0)
2357 return 0;
2359 arg1 %= width;
2360 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2361 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2362 break;
2364 case ROTATE:
2365 if (arg1 < 0)
2366 return 0;
2368 arg1 %= width;
2369 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2370 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2371 break;
2373 case COMPARE:
2374 /* Do nothing here. */
2375 return 0;
2377 case SMIN:
2378 val = arg0s <= arg1s ? arg0s : arg1s;
2379 break;
2381 case UMIN:
2382 val = ((unsigned HOST_WIDE_INT) arg0
2383 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2384 break;
2386 case SMAX:
2387 val = arg0s > arg1s ? arg0s : arg1s;
2388 break;
2390 case UMAX:
2391 val = ((unsigned HOST_WIDE_INT) arg0
2392 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2393 break;
2395 case SS_PLUS:
2396 case US_PLUS:
2397 case SS_MINUS:
2398 case US_MINUS:
2399 /* ??? There are simplifications that can be done. */
2400 return 0;
2402 default:
2403 gcc_unreachable ();
2406 val = trunc_int_for_mode (val, mode);
2408 return GEN_INT (val);
2411 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2412 PLUS or MINUS.
2414 Rather than test for specific case, we do this by a brute-force method
2415 and do all possible simplifications until no more changes occur. Then
2416 we rebuild the operation.
2418 If FORCE is true, then always generate the rtx. This is used to
2419 canonicalize stuff emitted from simplify_gen_binary. Note that this
2420 can still fail if the rtx is too complex. It won't fail just because
2421 the result is not 'simpler' than the input, however. */
2423 struct simplify_plus_minus_op_data
2425 rtx op;
2426 int neg;
2429 static int
2430 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2432 const struct simplify_plus_minus_op_data *d1 = p1;
2433 const struct simplify_plus_minus_op_data *d2 = p2;
2435 return (commutative_operand_precedence (d2->op)
2436 - commutative_operand_precedence (d1->op));
2439 static rtx
2440 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2441 rtx op1, int force)
2443 struct simplify_plus_minus_op_data ops[8];
2444 rtx result, tem;
2445 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2446 int first, changed;
2447 int i, j;
2449 memset (ops, 0, sizeof ops);
2451 /* Set up the two operands and then expand them until nothing has been
2452 changed. If we run out of room in our array, give up; this should
2453 almost never happen. */
2455 ops[0].op = op0;
2456 ops[0].neg = 0;
2457 ops[1].op = op1;
2458 ops[1].neg = (code == MINUS);
2462 changed = 0;
2464 for (i = 0; i < n_ops; i++)
2466 rtx this_op = ops[i].op;
2467 int this_neg = ops[i].neg;
2468 enum rtx_code this_code = GET_CODE (this_op);
2470 switch (this_code)
2472 case PLUS:
2473 case MINUS:
2474 if (n_ops == 7)
2475 return NULL_RTX;
2477 ops[n_ops].op = XEXP (this_op, 1);
2478 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2479 n_ops++;
2481 ops[i].op = XEXP (this_op, 0);
2482 input_ops++;
2483 changed = 1;
2484 break;
2486 case NEG:
2487 ops[i].op = XEXP (this_op, 0);
2488 ops[i].neg = ! this_neg;
2489 changed = 1;
2490 break;
2492 case CONST:
2493 if (n_ops < 7
2494 && GET_CODE (XEXP (this_op, 0)) == PLUS
2495 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2496 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2498 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2499 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2500 ops[n_ops].neg = this_neg;
2501 n_ops++;
2502 input_consts++;
2503 changed = 1;
2505 break;
2507 case NOT:
2508 /* ~a -> (-a - 1) */
2509 if (n_ops != 7)
2511 ops[n_ops].op = constm1_rtx;
2512 ops[n_ops++].neg = this_neg;
2513 ops[i].op = XEXP (this_op, 0);
2514 ops[i].neg = !this_neg;
2515 changed = 1;
2517 break;
2519 case CONST_INT:
2520 if (this_neg)
2522 ops[i].op = neg_const_int (mode, this_op);
2523 ops[i].neg = 0;
2524 changed = 1;
2526 break;
2528 default:
2529 break;
2533 while (changed);
2535 /* If we only have two operands, we can't do anything. */
2536 if (n_ops <= 2 && !force)
2537 return NULL_RTX;
2539 /* Count the number of CONSTs we didn't split above. */
2540 for (i = 0; i < n_ops; i++)
2541 if (GET_CODE (ops[i].op) == CONST)
2542 input_consts++;
2544 /* Now simplify each pair of operands until nothing changes. The first
2545 time through just simplify constants against each other. */
2547 first = 1;
2550 changed = first;
2552 for (i = 0; i < n_ops - 1; i++)
2553 for (j = i + 1; j < n_ops; j++)
2555 rtx lhs = ops[i].op, rhs = ops[j].op;
2556 int lneg = ops[i].neg, rneg = ops[j].neg;
2558 if (lhs != 0 && rhs != 0
2559 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2561 enum rtx_code ncode = PLUS;
2563 if (lneg != rneg)
2565 ncode = MINUS;
2566 if (lneg)
2567 tem = lhs, lhs = rhs, rhs = tem;
2569 else if (swap_commutative_operands_p (lhs, rhs))
2570 tem = lhs, lhs = rhs, rhs = tem;
2572 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2574 /* Reject "simplifications" that just wrap the two
2575 arguments in a CONST. Failure to do so can result
2576 in infinite recursion with simplify_binary_operation
2577 when it calls us to simplify CONST operations. */
2578 if (tem
2579 && ! (GET_CODE (tem) == CONST
2580 && GET_CODE (XEXP (tem, 0)) == ncode
2581 && XEXP (XEXP (tem, 0), 0) == lhs
2582 && XEXP (XEXP (tem, 0), 1) == rhs)
2583 /* Don't allow -x + -1 -> ~x simplifications in the
2584 first pass. This allows us the chance to combine
2585 the -1 with other constants. */
2586 && ! (first
2587 && GET_CODE (tem) == NOT
2588 && XEXP (tem, 0) == rhs))
2590 lneg &= rneg;
2591 if (GET_CODE (tem) == NEG)
2592 tem = XEXP (tem, 0), lneg = !lneg;
2593 if (GET_CODE (tem) == CONST_INT && lneg)
2594 tem = neg_const_int (mode, tem), lneg = 0;
2596 ops[i].op = tem;
2597 ops[i].neg = lneg;
2598 ops[j].op = NULL_RTX;
2599 changed = 1;
2604 first = 0;
2606 while (changed);
2608 /* Pack all the operands to the lower-numbered entries. */
2609 for (i = 0, j = 0; j < n_ops; j++)
2610 if (ops[j].op)
2611 ops[i++] = ops[j];
2612 n_ops = i;
2614 /* Sort the operations based on swap_commutative_operands_p. */
2615 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2617 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2618 if (n_ops == 2
2619 && GET_CODE (ops[1].op) == CONST_INT
2620 && CONSTANT_P (ops[0].op)
2621 && ops[0].neg)
2622 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2624 /* We suppressed creation of trivial CONST expressions in the
2625 combination loop to avoid recursion. Create one manually now.
2626 The combination loop should have ensured that there is exactly
2627 one CONST_INT, and the sort will have ensured that it is last
2628 in the array and that any other constant will be next-to-last. */
2630 if (n_ops > 1
2631 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2632 && CONSTANT_P (ops[n_ops - 2].op))
2634 rtx value = ops[n_ops - 1].op;
2635 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2636 value = neg_const_int (mode, value);
2637 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2638 n_ops--;
2641 /* Count the number of CONSTs that we generated. */
2642 n_consts = 0;
2643 for (i = 0; i < n_ops; i++)
2644 if (GET_CODE (ops[i].op) == CONST)
2645 n_consts++;
2647 /* Give up if we didn't reduce the number of operands we had. Make
2648 sure we count a CONST as two operands. If we have the same
2649 number of operands, but have made more CONSTs than before, this
2650 is also an improvement, so accept it. */
2651 if (!force
2652 && (n_ops + n_consts > input_ops
2653 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2654 return NULL_RTX;
2656 /* Put a non-negated operand first, if possible. */
2658 for (i = 0; i < n_ops && ops[i].neg; i++)
2659 continue;
2660 if (i == n_ops)
2661 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2662 else if (i != 0)
2664 tem = ops[0].op;
2665 ops[0] = ops[i];
2666 ops[i].op = tem;
2667 ops[i].neg = 1;
2670 /* Now make the result by performing the requested operations. */
2671 result = ops[0].op;
2672 for (i = 1; i < n_ops; i++)
2673 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2674 mode, result, ops[i].op);
2676 return result;
2679 /* Like simplify_binary_operation except used for relational operators.
2680 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2681 not also be VOIDmode.
2683 CMP_MODE specifies in which mode the comparison is done in, so it is
2684 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2685 the operands or, if both are VOIDmode, the operands are compared in
2686 "infinite precision". */
2688 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2689 enum machine_mode cmp_mode, rtx op0, rtx op1)
2691 rtx tem, trueop0, trueop1;
2693 if (cmp_mode == VOIDmode)
2694 cmp_mode = GET_MODE (op0);
2695 if (cmp_mode == VOIDmode)
2696 cmp_mode = GET_MODE (op1);
2698 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2699 if (tem)
2701 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2703 if (tem == const0_rtx)
2704 return CONST0_RTX (mode);
2705 #ifdef FLOAT_STORE_FLAG_VALUE
2707 REAL_VALUE_TYPE val;
2708 val = FLOAT_STORE_FLAG_VALUE (mode);
2709 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2711 #else
2712 return NULL_RTX;
2713 #endif
2715 if (VECTOR_MODE_P (mode))
2717 if (tem == const0_rtx)
2718 return CONST0_RTX (mode);
2719 #ifdef VECTOR_STORE_FLAG_VALUE
2721 int i, units;
2722 rtvec c;
2724 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2725 if (val == NULL_RTX)
2726 return NULL_RTX;
2727 if (val == const1_rtx)
2728 return CONST1_RTX (mode);
2730 units = GET_MODE_NUNITS (mode);
2731 v = rtvec_alloc (units);
2732 for (i = 0; i < units; i++)
2733 RTVEC_ELT (v, i) = val;
2734 return gen_rtx_raw_CONST_VECTOR (mode, v);
2736 #else
2737 return NULL_RTX;
2738 #endif
2741 return tem;
2744 /* For the following tests, ensure const0_rtx is op1. */
2745 if (swap_commutative_operands_p (op0, op1)
2746 || (op0 == const0_rtx && op1 != const0_rtx))
2747 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2749 /* If op0 is a compare, extract the comparison arguments from it. */
2750 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2751 return simplify_relational_operation (code, mode, VOIDmode,
2752 XEXP (op0, 0), XEXP (op0, 1));
2754 if (mode == VOIDmode
2755 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2756 || CC0_P (op0))
2757 return NULL_RTX;
2759 trueop0 = avoid_constant_pool_reference (op0);
2760 trueop1 = avoid_constant_pool_reference (op1);
2761 return simplify_relational_operation_1 (code, mode, cmp_mode,
2762 trueop0, trueop1);
2765 /* This part of simplify_relational_operation is only used when CMP_MODE
2766 is not in class MODE_CC (i.e. it is a real comparison).
2768 MODE is the mode of the result, while CMP_MODE specifies in which
2769 mode the comparison is done in, so it is the mode of the operands. */
2771 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2772 enum machine_mode cmp_mode, rtx op0, rtx op1)
2774 if (GET_CODE (op1) == CONST_INT)
2776 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2778 /* If op0 is a comparison, extract the comparison arguments form it. */
2779 if (code == NE)
2781 if (GET_MODE (op0) == cmp_mode)
2782 return simplify_rtx (op0);
2783 else
2784 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2785 XEXP (op0, 0), XEXP (op0, 1));
2787 else if (code == EQ)
2789 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2790 if (new_code != UNKNOWN)
2791 return simplify_gen_relational (new_code, mode, VOIDmode,
2792 XEXP (op0, 0), XEXP (op0, 1));
2797 return NULL_RTX;
2800 /* Check if the given comparison (done in the given MODE) is actually a
2801 tautology or a contradiction.
2802 If no simplification is possible, this function returns zero.
2803 Otherwise, it returns either const_true_rtx or const0_rtx. */
2806 simplify_const_relational_operation (enum rtx_code code,
2807 enum machine_mode mode,
2808 rtx op0, rtx op1)
2810 int equal, op0lt, op0ltu, op1lt, op1ltu;
2811 rtx tem;
2812 rtx trueop0;
2813 rtx trueop1;
2815 gcc_assert (mode != VOIDmode
2816 || (GET_MODE (op0) == VOIDmode
2817 && GET_MODE (op1) == VOIDmode));
2819 /* If op0 is a compare, extract the comparison arguments from it. */
2820 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2821 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2823 /* We can't simplify MODE_CC values since we don't know what the
2824 actual comparison is. */
2825 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2826 return 0;
2828 /* Make sure the constant is second. */
2829 if (swap_commutative_operands_p (op0, op1))
2831 tem = op0, op0 = op1, op1 = tem;
2832 code = swap_condition (code);
2835 trueop0 = avoid_constant_pool_reference (op0);
2836 trueop1 = avoid_constant_pool_reference (op1);
2838 /* For integer comparisons of A and B maybe we can simplify A - B and can
2839 then simplify a comparison of that with zero. If A and B are both either
2840 a register or a CONST_INT, this can't help; testing for these cases will
2841 prevent infinite recursion here and speed things up.
2843 If CODE is an unsigned comparison, then we can never do this optimization,
2844 because it gives an incorrect result if the subtraction wraps around zero.
2845 ANSI C defines unsigned operations such that they never overflow, and
2846 thus such cases can not be ignored; but we cannot do it even for
2847 signed comparisons for languages such as Java, so test flag_wrapv. */
2849 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2850 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2851 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2852 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2853 /* We cannot do this for == or != if tem is a nonzero address. */
2854 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2855 && code != GTU && code != GEU && code != LTU && code != LEU)
2856 return simplify_const_relational_operation (signed_condition (code),
2857 mode, tem, const0_rtx);
2859 if (flag_unsafe_math_optimizations && code == ORDERED)
2860 return const_true_rtx;
2862 if (flag_unsafe_math_optimizations && code == UNORDERED)
2863 return const0_rtx;
2865 /* For modes without NaNs, if the two operands are equal, we know the
2866 result except if they have side-effects. */
2867 if (! HONOR_NANS (GET_MODE (trueop0))
2868 && rtx_equal_p (trueop0, trueop1)
2869 && ! side_effects_p (trueop0))
2870 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2872 /* If the operands are floating-point constants, see if we can fold
2873 the result. */
2874 else if (GET_CODE (trueop0) == CONST_DOUBLE
2875 && GET_CODE (trueop1) == CONST_DOUBLE
2876 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2878 REAL_VALUE_TYPE d0, d1;
2880 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2881 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2883 /* Comparisons are unordered iff at least one of the values is NaN. */
2884 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2885 switch (code)
2887 case UNEQ:
2888 case UNLT:
2889 case UNGT:
2890 case UNLE:
2891 case UNGE:
2892 case NE:
2893 case UNORDERED:
2894 return const_true_rtx;
2895 case EQ:
2896 case LT:
2897 case GT:
2898 case LE:
2899 case GE:
2900 case LTGT:
2901 case ORDERED:
2902 return const0_rtx;
2903 default:
2904 return 0;
2907 equal = REAL_VALUES_EQUAL (d0, d1);
2908 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2909 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2912 /* Otherwise, see if the operands are both integers. */
2913 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2914 && (GET_CODE (trueop0) == CONST_DOUBLE
2915 || GET_CODE (trueop0) == CONST_INT)
2916 && (GET_CODE (trueop1) == CONST_DOUBLE
2917 || GET_CODE (trueop1) == CONST_INT))
2919 int width = GET_MODE_BITSIZE (mode);
2920 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2921 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2923 /* Get the two words comprising each integer constant. */
2924 if (GET_CODE (trueop0) == CONST_DOUBLE)
2926 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2927 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2929 else
2931 l0u = l0s = INTVAL (trueop0);
2932 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2935 if (GET_CODE (trueop1) == CONST_DOUBLE)
2937 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2938 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2940 else
2942 l1u = l1s = INTVAL (trueop1);
2943 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2946 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2947 we have to sign or zero-extend the values. */
2948 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2950 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2951 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2953 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2954 l0s |= ((HOST_WIDE_INT) (-1) << width);
2956 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2957 l1s |= ((HOST_WIDE_INT) (-1) << width);
2959 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2960 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2962 equal = (h0u == h1u && l0u == l1u);
2963 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2964 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2965 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2966 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2969 /* Otherwise, there are some code-specific tests we can make. */
2970 else
2972 /* Optimize comparisons with upper and lower bounds. */
2973 if (SCALAR_INT_MODE_P (mode)
2974 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2976 rtx mmin, mmax;
2977 int sign;
2979 if (code == GEU
2980 || code == LEU
2981 || code == GTU
2982 || code == LTU)
2983 sign = 0;
2984 else
2985 sign = 1;
2987 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
2989 tem = NULL_RTX;
2990 switch (code)
2992 case GEU:
2993 case GE:
2994 /* x >= min is always true. */
2995 if (rtx_equal_p (trueop1, mmin))
2996 tem = const_true_rtx;
2997 else
2998 break;
3000 case LEU:
3001 case LE:
3002 /* x <= max is always true. */
3003 if (rtx_equal_p (trueop1, mmax))
3004 tem = const_true_rtx;
3005 break;
3007 case GTU:
3008 case GT:
3009 /* x > max is always false. */
3010 if (rtx_equal_p (trueop1, mmax))
3011 tem = const0_rtx;
3012 break;
3014 case LTU:
3015 case LT:
3016 /* x < min is always false. */
3017 if (rtx_equal_p (trueop1, mmin))
3018 tem = const0_rtx;
3019 break;
3021 default:
3022 break;
3024 if (tem == const0_rtx
3025 || tem == const_true_rtx)
3026 return tem;
3029 switch (code)
3031 case EQ:
3032 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3033 return const0_rtx;
3034 break;
3036 case NE:
3037 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3038 return const_true_rtx;
3039 break;
3041 case LT:
3042 /* Optimize abs(x) < 0.0. */
3043 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3045 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3046 : trueop0;
3047 if (GET_CODE (tem) == ABS)
3048 return const0_rtx;
3050 break;
3052 case GE:
3053 /* Optimize abs(x) >= 0.0. */
3054 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3056 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3057 : trueop0;
3058 if (GET_CODE (tem) == ABS)
3059 return const_true_rtx;
3061 break;
3063 case UNGE:
3064 /* Optimize ! (abs(x) < 0.0). */
3065 if (trueop1 == CONST0_RTX (mode))
3067 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3068 : trueop0;
3069 if (GET_CODE (tem) == ABS)
3070 return const_true_rtx;
3072 break;
3074 default:
3075 break;
3078 return 0;
3081 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3082 as appropriate. */
3083 switch (code)
3085 case EQ:
3086 case UNEQ:
3087 return equal ? const_true_rtx : const0_rtx;
3088 case NE:
3089 case LTGT:
3090 return ! equal ? const_true_rtx : const0_rtx;
3091 case LT:
3092 case UNLT:
3093 return op0lt ? const_true_rtx : const0_rtx;
3094 case GT:
3095 case UNGT:
3096 return op1lt ? const_true_rtx : const0_rtx;
3097 case LTU:
3098 return op0ltu ? const_true_rtx : const0_rtx;
3099 case GTU:
3100 return op1ltu ? const_true_rtx : const0_rtx;
3101 case LE:
3102 case UNLE:
3103 return equal || op0lt ? const_true_rtx : const0_rtx;
3104 case GE:
3105 case UNGE:
3106 return equal || op1lt ? const_true_rtx : const0_rtx;
3107 case LEU:
3108 return equal || op0ltu ? const_true_rtx : const0_rtx;
3109 case GEU:
3110 return equal || op1ltu ? const_true_rtx : const0_rtx;
3111 case ORDERED:
3112 return const_true_rtx;
3113 case UNORDERED:
3114 return const0_rtx;
3115 default:
3116 gcc_unreachable ();
3120 /* Simplify CODE, an operation with result mode MODE and three operands,
3121 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3122 a constant. Return 0 if no simplifications is possible. */
3125 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3126 enum machine_mode op0_mode, rtx op0, rtx op1,
3127 rtx op2)
3129 unsigned int width = GET_MODE_BITSIZE (mode);
3131 /* VOIDmode means "infinite" precision. */
3132 if (width == 0)
3133 width = HOST_BITS_PER_WIDE_INT;
3135 switch (code)
3137 case SIGN_EXTRACT:
3138 case ZERO_EXTRACT:
3139 if (GET_CODE (op0) == CONST_INT
3140 && GET_CODE (op1) == CONST_INT
3141 && GET_CODE (op2) == CONST_INT
3142 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3143 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3145 /* Extracting a bit-field from a constant */
3146 HOST_WIDE_INT val = INTVAL (op0);
3148 if (BITS_BIG_ENDIAN)
3149 val >>= (GET_MODE_BITSIZE (op0_mode)
3150 - INTVAL (op2) - INTVAL (op1));
3151 else
3152 val >>= INTVAL (op2);
3154 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3156 /* First zero-extend. */
3157 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3158 /* If desired, propagate sign bit. */
3159 if (code == SIGN_EXTRACT
3160 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3161 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3164 /* Clear the bits that don't belong in our mode,
3165 unless they and our sign bit are all one.
3166 So we get either a reasonable negative value or a reasonable
3167 unsigned value for this mode. */
3168 if (width < HOST_BITS_PER_WIDE_INT
3169 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3170 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3171 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3173 return gen_int_mode (val, mode);
3175 break;
3177 case IF_THEN_ELSE:
3178 if (GET_CODE (op0) == CONST_INT)
3179 return op0 != const0_rtx ? op1 : op2;
3181 /* Convert c ? a : a into "a". */
3182 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3183 return op1;
3185 /* Convert a != b ? a : b into "a". */
3186 if (GET_CODE (op0) == NE
3187 && ! side_effects_p (op0)
3188 && ! HONOR_NANS (mode)
3189 && ! HONOR_SIGNED_ZEROS (mode)
3190 && ((rtx_equal_p (XEXP (op0, 0), op1)
3191 && rtx_equal_p (XEXP (op0, 1), op2))
3192 || (rtx_equal_p (XEXP (op0, 0), op2)
3193 && rtx_equal_p (XEXP (op0, 1), op1))))
3194 return op1;
3196 /* Convert a == b ? a : b into "b". */
3197 if (GET_CODE (op0) == EQ
3198 && ! side_effects_p (op0)
3199 && ! HONOR_NANS (mode)
3200 && ! HONOR_SIGNED_ZEROS (mode)
3201 && ((rtx_equal_p (XEXP (op0, 0), op1)
3202 && rtx_equal_p (XEXP (op0, 1), op2))
3203 || (rtx_equal_p (XEXP (op0, 0), op2)
3204 && rtx_equal_p (XEXP (op0, 1), op1))))
3205 return op2;
3207 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3209 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3210 ? GET_MODE (XEXP (op0, 1))
3211 : GET_MODE (XEXP (op0, 0)));
3212 rtx temp;
3214 /* Look for happy constants in op1 and op2. */
3215 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3217 HOST_WIDE_INT t = INTVAL (op1);
3218 HOST_WIDE_INT f = INTVAL (op2);
3220 if (t == STORE_FLAG_VALUE && f == 0)
3221 code = GET_CODE (op0);
3222 else if (t == 0 && f == STORE_FLAG_VALUE)
3224 enum rtx_code tmp;
3225 tmp = reversed_comparison_code (op0, NULL_RTX);
3226 if (tmp == UNKNOWN)
3227 break;
3228 code = tmp;
3230 else
3231 break;
3233 return simplify_gen_relational (code, mode, cmp_mode,
3234 XEXP (op0, 0), XEXP (op0, 1));
3237 if (cmp_mode == VOIDmode)
3238 cmp_mode = op0_mode;
3239 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3240 cmp_mode, XEXP (op0, 0),
3241 XEXP (op0, 1));
3243 /* See if any simplifications were possible. */
3244 if (temp)
3246 if (GET_CODE (temp) == CONST_INT)
3247 return temp == const0_rtx ? op2 : op1;
3248 else if (temp)
3249 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3252 break;
3254 case VEC_MERGE:
3255 gcc_assert (GET_MODE (op0) == mode);
3256 gcc_assert (GET_MODE (op1) == mode);
3257 gcc_assert (VECTOR_MODE_P (mode));
3258 op2 = avoid_constant_pool_reference (op2);
3259 if (GET_CODE (op2) == CONST_INT)
3261 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3262 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3263 int mask = (1 << n_elts) - 1;
3265 if (!(INTVAL (op2) & mask))
3266 return op1;
3267 if ((INTVAL (op2) & mask) == mask)
3268 return op0;
3270 op0 = avoid_constant_pool_reference (op0);
3271 op1 = avoid_constant_pool_reference (op1);
3272 if (GET_CODE (op0) == CONST_VECTOR
3273 && GET_CODE (op1) == CONST_VECTOR)
3275 rtvec v = rtvec_alloc (n_elts);
3276 unsigned int i;
3278 for (i = 0; i < n_elts; i++)
3279 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3280 ? CONST_VECTOR_ELT (op0, i)
3281 : CONST_VECTOR_ELT (op1, i));
3282 return gen_rtx_CONST_VECTOR (mode, v);
3285 break;
3287 default:
3288 gcc_unreachable ();
3291 return 0;
3294 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3295 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3297 Works by unpacking OP into a collection of 8-bit values
3298 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3299 and then repacking them again for OUTERMODE. */
3301 static rtx
3302 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3303 enum machine_mode innermode, unsigned int byte)
3305 /* We support up to 512-bit values (for V8DFmode). */
3306 enum {
3307 max_bitsize = 512,
3308 value_bit = 8,
3309 value_mask = (1 << value_bit) - 1
3311 unsigned char value[max_bitsize / value_bit];
3312 int value_start;
3313 int i;
3314 int elem;
3316 int num_elem;
3317 rtx * elems;
3318 int elem_bitsize;
3319 rtx result_s;
3320 rtvec result_v = NULL;
3321 enum mode_class outer_class;
3322 enum machine_mode outer_submode;
3324 /* Some ports misuse CCmode. */
3325 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3326 return op;
3328 /* Unpack the value. */
3330 if (GET_CODE (op) == CONST_VECTOR)
3332 num_elem = CONST_VECTOR_NUNITS (op);
3333 elems = &CONST_VECTOR_ELT (op, 0);
3334 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3336 else
3338 num_elem = 1;
3339 elems = &op;
3340 elem_bitsize = max_bitsize;
3342 /* If this asserts, it is too complicated; reducing value_bit may help. */
3343 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3344 /* I don't know how to handle endianness of sub-units. */
3345 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3347 for (elem = 0; elem < num_elem; elem++)
3349 unsigned char * vp;
3350 rtx el = elems[elem];
3352 /* Vectors are kept in target memory order. (This is probably
3353 a mistake.) */
3355 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3356 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3357 / BITS_PER_UNIT);
3358 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3359 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3360 unsigned bytele = (subword_byte % UNITS_PER_WORD
3361 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3362 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3365 switch (GET_CODE (el))
3367 case CONST_INT:
3368 for (i = 0;
3369 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3370 i += value_bit)
3371 *vp++ = INTVAL (el) >> i;
3372 /* CONST_INTs are always logically sign-extended. */
3373 for (; i < elem_bitsize; i += value_bit)
3374 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3375 break;
3377 case CONST_DOUBLE:
3378 if (GET_MODE (el) == VOIDmode)
3380 /* If this triggers, someone should have generated a
3381 CONST_INT instead. */
3382 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3384 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3385 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3386 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3388 *vp++
3389 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3390 i += value_bit;
3392 /* It shouldn't matter what's done here, so fill it with
3393 zero. */
3394 for (; i < max_bitsize; i += value_bit)
3395 *vp++ = 0;
3397 else
3399 long tmp[max_bitsize / 32];
3400 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3402 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3403 gcc_assert (bitsize <= elem_bitsize);
3404 gcc_assert (bitsize % value_bit == 0);
3406 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3407 GET_MODE (el));
3409 /* real_to_target produces its result in words affected by
3410 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3411 and use WORDS_BIG_ENDIAN instead; see the documentation
3412 of SUBREG in rtl.texi. */
3413 for (i = 0; i < bitsize; i += value_bit)
3415 int ibase;
3416 if (WORDS_BIG_ENDIAN)
3417 ibase = bitsize - 1 - i;
3418 else
3419 ibase = i;
3420 *vp++ = tmp[ibase / 32] >> i % 32;
3423 /* It shouldn't matter what's done here, so fill it with
3424 zero. */
3425 for (; i < elem_bitsize; i += value_bit)
3426 *vp++ = 0;
3428 break;
3430 default:
3431 gcc_unreachable ();
3435 /* Now, pick the right byte to start with. */
3436 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3437 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3438 will already have offset 0. */
3439 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3441 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3442 - byte);
3443 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3444 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3445 byte = (subword_byte % UNITS_PER_WORD
3446 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3449 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3450 so if it's become negative it will instead be very large.) */
3451 gcc_assert (byte < GET_MODE_SIZE (innermode));
3453 /* Convert from bytes to chunks of size value_bit. */
3454 value_start = byte * (BITS_PER_UNIT / value_bit);
3456 /* Re-pack the value. */
3458 if (VECTOR_MODE_P (outermode))
3460 num_elem = GET_MODE_NUNITS (outermode);
3461 result_v = rtvec_alloc (num_elem);
3462 elems = &RTVEC_ELT (result_v, 0);
3463 outer_submode = GET_MODE_INNER (outermode);
3465 else
3467 num_elem = 1;
3468 elems = &result_s;
3469 outer_submode = outermode;
3472 outer_class = GET_MODE_CLASS (outer_submode);
3473 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3475 gcc_assert (elem_bitsize % value_bit == 0);
3476 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3478 for (elem = 0; elem < num_elem; elem++)
3480 unsigned char *vp;
3482 /* Vectors are stored in target memory order. (This is probably
3483 a mistake.) */
3485 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3486 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3487 / BITS_PER_UNIT);
3488 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3489 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3490 unsigned bytele = (subword_byte % UNITS_PER_WORD
3491 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3492 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3495 switch (outer_class)
3497 case MODE_INT:
3498 case MODE_PARTIAL_INT:
3500 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3502 for (i = 0;
3503 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3504 i += value_bit)
3505 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3506 for (; i < elem_bitsize; i += value_bit)
3507 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3508 << (i - HOST_BITS_PER_WIDE_INT));
3510 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3511 know why. */
3512 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3513 elems[elem] = gen_int_mode (lo, outer_submode);
3514 else
3515 elems[elem] = immed_double_const (lo, hi, outer_submode);
3517 break;
3519 case MODE_FLOAT:
3521 REAL_VALUE_TYPE r;
3522 long tmp[max_bitsize / 32];
3524 /* real_from_target wants its input in words affected by
3525 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3526 and use WORDS_BIG_ENDIAN instead; see the documentation
3527 of SUBREG in rtl.texi. */
3528 for (i = 0; i < max_bitsize / 32; i++)
3529 tmp[i] = 0;
3530 for (i = 0; i < elem_bitsize; i += value_bit)
3532 int ibase;
3533 if (WORDS_BIG_ENDIAN)
3534 ibase = elem_bitsize - 1 - i;
3535 else
3536 ibase = i;
3537 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3540 real_from_target (&r, tmp, outer_submode);
3541 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3543 break;
3545 default:
3546 gcc_unreachable ();
3549 if (VECTOR_MODE_P (outermode))
3550 return gen_rtx_CONST_VECTOR (outermode, result_v);
3551 else
3552 return result_s;
3555 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3556 Return 0 if no simplifications are possible. */
3558 simplify_subreg (enum machine_mode outermode, rtx op,
3559 enum machine_mode innermode, unsigned int byte)
3561 /* Little bit of sanity checking. */
3562 gcc_assert (innermode != VOIDmode);
3563 gcc_assert (outermode != VOIDmode);
3564 gcc_assert (innermode != BLKmode);
3565 gcc_assert (outermode != BLKmode);
3567 gcc_assert (GET_MODE (op) == innermode
3568 || GET_MODE (op) == VOIDmode);
3570 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3571 gcc_assert (byte < GET_MODE_SIZE (innermode));
3573 if (outermode == innermode && !byte)
3574 return op;
3576 if (GET_CODE (op) == CONST_INT
3577 || GET_CODE (op) == CONST_DOUBLE
3578 || GET_CODE (op) == CONST_VECTOR)
3579 return simplify_immed_subreg (outermode, op, innermode, byte);
3581 /* Changing mode twice with SUBREG => just change it once,
3582 or not at all if changing back op starting mode. */
3583 if (GET_CODE (op) == SUBREG)
3585 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3586 int final_offset = byte + SUBREG_BYTE (op);
3587 rtx newx;
3589 if (outermode == innermostmode
3590 && byte == 0 && SUBREG_BYTE (op) == 0)
3591 return SUBREG_REG (op);
3593 /* The SUBREG_BYTE represents offset, as if the value were stored
3594 in memory. Irritating exception is paradoxical subreg, where
3595 we define SUBREG_BYTE to be 0. On big endian machines, this
3596 value should be negative. For a moment, undo this exception. */
3597 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3599 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3600 if (WORDS_BIG_ENDIAN)
3601 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3602 if (BYTES_BIG_ENDIAN)
3603 final_offset += difference % UNITS_PER_WORD;
3605 if (SUBREG_BYTE (op) == 0
3606 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3608 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3609 if (WORDS_BIG_ENDIAN)
3610 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3611 if (BYTES_BIG_ENDIAN)
3612 final_offset += difference % UNITS_PER_WORD;
3615 /* See whether resulting subreg will be paradoxical. */
3616 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3618 /* In nonparadoxical subregs we can't handle negative offsets. */
3619 if (final_offset < 0)
3620 return NULL_RTX;
3621 /* Bail out in case resulting subreg would be incorrect. */
3622 if (final_offset % GET_MODE_SIZE (outermode)
3623 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3624 return NULL_RTX;
3626 else
3628 int offset = 0;
3629 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3631 /* In paradoxical subreg, see if we are still looking on lower part.
3632 If so, our SUBREG_BYTE will be 0. */
3633 if (WORDS_BIG_ENDIAN)
3634 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3635 if (BYTES_BIG_ENDIAN)
3636 offset += difference % UNITS_PER_WORD;
3637 if (offset == final_offset)
3638 final_offset = 0;
3639 else
3640 return NULL_RTX;
3643 /* Recurse for further possible simplifications. */
3644 newx = simplify_subreg (outermode, SUBREG_REG (op),
3645 GET_MODE (SUBREG_REG (op)),
3646 final_offset);
3647 if (newx)
3648 return newx;
3649 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3652 /* SUBREG of a hard register => just change the register number
3653 and/or mode. If the hard register is not valid in that mode,
3654 suppress this simplification. If the hard register is the stack,
3655 frame, or argument pointer, leave this as a SUBREG. */
3657 if (REG_P (op)
3658 && REGNO (op) < FIRST_PSEUDO_REGISTER
3659 #ifdef CANNOT_CHANGE_MODE_CLASS
3660 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3661 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3662 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3663 #endif
3664 && ((reload_completed && !frame_pointer_needed)
3665 || (REGNO (op) != FRAME_POINTER_REGNUM
3666 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3667 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3668 #endif
3670 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3671 && REGNO (op) != ARG_POINTER_REGNUM
3672 #endif
3673 && REGNO (op) != STACK_POINTER_REGNUM
3674 && subreg_offset_representable_p (REGNO (op), innermode,
3675 byte, outermode))
3677 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3678 int final_regno = subreg_hard_regno (tem, 0);
3680 /* ??? We do allow it if the current REG is not valid for
3681 its mode. This is a kludge to work around how float/complex
3682 arguments are passed on 32-bit SPARC and should be fixed. */
3683 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3684 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3686 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3688 /* Propagate original regno. We don't have any way to specify
3689 the offset inside original regno, so do so only for lowpart.
3690 The information is used only by alias analysis that can not
3691 grog partial register anyway. */
3693 if (subreg_lowpart_offset (outermode, innermode) == byte)
3694 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3695 return x;
3699 /* If we have a SUBREG of a register that we are replacing and we are
3700 replacing it with a MEM, make a new MEM and try replacing the
3701 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3702 or if we would be widening it. */
3704 if (MEM_P (op)
3705 && ! mode_dependent_address_p (XEXP (op, 0))
3706 /* Allow splitting of volatile memory references in case we don't
3707 have instruction to move the whole thing. */
3708 && (! MEM_VOLATILE_P (op)
3709 || ! have_insn_for (SET, innermode))
3710 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3711 return adjust_address_nv (op, outermode, byte);
3713 /* Handle complex values represented as CONCAT
3714 of real and imaginary part. */
3715 if (GET_CODE (op) == CONCAT)
3717 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3718 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3719 unsigned int final_offset;
3720 rtx res;
3722 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3723 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3724 if (res)
3725 return res;
3726 /* We can at least simplify it by referring directly to the
3727 relevant part. */
3728 return gen_rtx_SUBREG (outermode, part, final_offset);
3731 /* Optimize SUBREG truncations of zero and sign extended values. */
3732 if ((GET_CODE (op) == ZERO_EXTEND
3733 || GET_CODE (op) == SIGN_EXTEND)
3734 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3736 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3738 /* If we're requesting the lowpart of a zero or sign extension,
3739 there are three possibilities. If the outermode is the same
3740 as the origmode, we can omit both the extension and the subreg.
3741 If the outermode is not larger than the origmode, we can apply
3742 the truncation without the extension. Finally, if the outermode
3743 is larger than the origmode, but both are integer modes, we
3744 can just extend to the appropriate mode. */
3745 if (bitpos == 0)
3747 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3748 if (outermode == origmode)
3749 return XEXP (op, 0);
3750 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3751 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3752 subreg_lowpart_offset (outermode,
3753 origmode));
3754 if (SCALAR_INT_MODE_P (outermode))
3755 return simplify_gen_unary (GET_CODE (op), outermode,
3756 XEXP (op, 0), origmode);
3759 /* A SUBREG resulting from a zero extension may fold to zero if
3760 it extracts higher bits that the ZERO_EXTEND's source bits. */
3761 if (GET_CODE (op) == ZERO_EXTEND
3762 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3763 return CONST0_RTX (outermode);
3766 return NULL_RTX;
3769 /* Make a SUBREG operation or equivalent if it folds. */
3772 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3773 enum machine_mode innermode, unsigned int byte)
3775 rtx newx;
3776 /* Little bit of sanity checking. */
3777 gcc_assert (innermode != VOIDmode);
3778 gcc_assert (outermode != VOIDmode);
3779 gcc_assert (innermode != BLKmode);
3780 gcc_assert (outermode != BLKmode);
3782 gcc_assert (GET_MODE (op) == innermode
3783 || GET_MODE (op) == VOIDmode);
3785 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3786 gcc_assert (byte < GET_MODE_SIZE (innermode));
3788 newx = simplify_subreg (outermode, op, innermode, byte);
3789 if (newx)
3790 return newx;
3792 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode
3793 || (REG_P (op) && REGNO (op) < FIRST_PSEUDO_REGISTER))
3794 return NULL_RTX;
3796 return gen_rtx_SUBREG (outermode, op, byte);
3798 /* Simplify X, an rtx expression.
3800 Return the simplified expression or NULL if no simplifications
3801 were possible.
3803 This is the preferred entry point into the simplification routines;
3804 however, we still allow passes to call the more specific routines.
3806 Right now GCC has three (yes, three) major bodies of RTL simplification
3807 code that need to be unified.
3809 1. fold_rtx in cse.c. This code uses various CSE specific
3810 information to aid in RTL simplification.
3812 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3813 it uses combine specific information to aid in RTL
3814 simplification.
3816 3. The routines in this file.
3819 Long term we want to only have one body of simplification code; to
3820 get to that state I recommend the following steps:
3822 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3823 which are not pass dependent state into these routines.
3825 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3826 use this routine whenever possible.
3828 3. Allow for pass dependent state to be provided to these
3829 routines and add simplifications based on the pass dependent
3830 state. Remove code from cse.c & combine.c that becomes
3831 redundant/dead.
3833 It will take time, but ultimately the compiler will be easier to
3834 maintain and improve. It's totally silly that when we add a
3835 simplification that it needs to be added to 4 places (3 for RTL
3836 simplification and 1 for tree simplification. */
3839 simplify_rtx (rtx x)
3841 enum rtx_code code = GET_CODE (x);
3842 enum machine_mode mode = GET_MODE (x);
3844 switch (GET_RTX_CLASS (code))
3846 case RTX_UNARY:
3847 return simplify_unary_operation (code, mode,
3848 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3849 case RTX_COMM_ARITH:
3850 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3851 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3853 /* Fall through.... */
3855 case RTX_BIN_ARITH:
3856 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3858 case RTX_TERNARY:
3859 case RTX_BITFIELD_OPS:
3860 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3861 XEXP (x, 0), XEXP (x, 1),
3862 XEXP (x, 2));
3864 case RTX_COMPARE:
3865 case RTX_COMM_COMPARE:
3866 return simplify_relational_operation (code, mode,
3867 ((GET_MODE (XEXP (x, 0))
3868 != VOIDmode)
3869 ? GET_MODE (XEXP (x, 0))
3870 : GET_MODE (XEXP (x, 1))),
3871 XEXP (x, 0),
3872 XEXP (x, 1));
3874 case RTX_EXTRA:
3875 if (code == SUBREG)
3876 return simplify_gen_subreg (mode, SUBREG_REG (x),
3877 GET_MODE (SUBREG_REG (x)),
3878 SUBREG_BYTE (x));
3879 break;
3881 case RTX_OBJ:
3882 if (code == LO_SUM)
3884 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3885 if (GET_CODE (XEXP (x, 0)) == HIGH
3886 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3887 return XEXP (x, 1);
3889 break;
3891 default:
3892 break;
3894 return NULL;