PR c++/16115
[official-gcc.git] / gcc / simplify-rtx.c
blob2297f69781859f719b89e3c47f6edc830f851817
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool mode_signbit_p (enum machine_mode, rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, rtx i)
69 return gen_int_mode (- INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 static bool
76 mode_signbit_p (enum machine_mode mode, rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_BITSIZE (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x) == CONST_INT)
90 val = INTVAL (x);
91 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
98 else
99 return false;
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
110 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
111 rtx op1)
113 rtx tem;
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
122 if (tem)
123 return tem;
125 /* Handle addition and subtraction specially. Otherwise, just form
126 the operation. */
128 if (code == PLUS || code == MINUS)
130 tem = simplify_plus_minus (code, mode, op0, op1, 1);
131 if (tem)
132 return tem;
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
141 avoid_constant_pool_reference (rtx x)
143 rtx c, tmp, addr;
144 enum machine_mode cmode;
146 switch (GET_CODE (x))
148 case MEM:
149 break;
151 case FLOAT_EXTEND:
152 /* Handle float extensions of constant pool references. */
153 tmp = XEXP (x, 0);
154 c = avoid_constant_pool_reference (tmp);
155 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
157 REAL_VALUE_TYPE d;
159 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
162 return x;
164 default:
165 return x;
168 addr = XEXP (x, 0);
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr = targetm.delegitimize_address (addr);
173 if (GET_CODE (addr) == LO_SUM)
174 addr = XEXP (addr, 1);
176 if (GET_CODE (addr) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr))
178 return x;
180 c = get_pool_constant (addr);
181 cmode = get_pool_mode (addr);
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode != GET_MODE (x))
188 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
189 return c ? c : x;
192 return c;
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
199 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
200 enum machine_mode op_mode)
202 rtx tem;
204 /* If this simplifies, use it. */
205 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
206 return tem;
208 return gen_rtx_fmt_e (code, mode, op);
211 /* Likewise for ternary operations. */
214 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
215 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
217 rtx tem;
219 /* If this simplifies, use it. */
220 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
221 op0, op1, op2)))
222 return tem;
224 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
231 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
232 enum machine_mode cmp_mode, rtx op0, rtx op1)
234 rtx tem;
236 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
237 op0, op1)))
238 return tem;
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
243 /* Replace all occurrences of OLD in X with NEW and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (rtx x, rtx old, rtx new)
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251 enum machine_mode op_mode;
252 rtx op0, op1, op2;
254 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
258 if (x == old)
259 return new;
261 switch (GET_RTX_CLASS (code))
263 case RTX_UNARY:
264 op0 = XEXP (x, 0);
265 op_mode = GET_MODE (op0);
266 op0 = simplify_replace_rtx (op0, old, new);
267 if (op0 == XEXP (x, 0))
268 return x;
269 return simplify_gen_unary (code, mode, op0, op_mode);
271 case RTX_BIN_ARITH:
272 case RTX_COMM_ARITH:
273 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
274 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
275 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
276 return x;
277 return simplify_gen_binary (code, mode, op0, op1);
279 case RTX_COMPARE:
280 case RTX_COMM_COMPARE:
281 op0 = XEXP (x, 0);
282 op1 = XEXP (x, 1);
283 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
284 op0 = simplify_replace_rtx (op0, old, new);
285 op1 = simplify_replace_rtx (op1, old, new);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return x;
288 return simplify_gen_relational (code, mode, op_mode, op0, op1);
290 case RTX_TERNARY:
291 case RTX_BITFIELD_OPS:
292 op0 = XEXP (x, 0);
293 op_mode = GET_MODE (op0);
294 op0 = simplify_replace_rtx (op0, old, new);
295 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
296 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
298 return x;
299 if (op_mode == VOIDmode)
300 op_mode = GET_MODE (op0);
301 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
303 case RTX_EXTRA:
304 /* The only case we try to handle is a SUBREG. */
305 if (code == SUBREG)
307 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
308 if (op0 == SUBREG_REG (x))
309 return x;
310 op0 = simplify_gen_subreg (GET_MODE (x), op0,
311 GET_MODE (SUBREG_REG (x)),
312 SUBREG_BYTE (x));
313 return op0 ? op0 : x;
315 break;
317 case RTX_OBJ:
318 if (code == MEM)
320 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
321 if (op0 == XEXP (x, 0))
322 return x;
323 return replace_equiv_address_nv (x, op0);
325 else if (code == LO_SUM)
327 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
328 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
332 return op1;
334 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
335 return x;
336 return gen_rtx_LO_SUM (mode, op0, op1);
338 else if (code == REG)
340 if (REG_P (old) && REGNO (x) == REGNO (old))
341 return new;
343 break;
345 default:
346 break;
348 return x;
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
355 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
356 rtx op, enum machine_mode op_mode)
358 unsigned int width = GET_MODE_BITSIZE (mode);
359 rtx trueop = avoid_constant_pool_reference (op);
361 if (code == VEC_DUPLICATE)
363 if (!VECTOR_MODE_P (mode))
364 abort ();
365 if (GET_MODE (trueop) != VOIDmode
366 && !VECTOR_MODE_P (GET_MODE (trueop))
367 && GET_MODE_INNER (mode) != GET_MODE (trueop))
368 abort ();
369 if (GET_MODE (trueop) != VOIDmode
370 && VECTOR_MODE_P (GET_MODE (trueop))
371 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
372 abort ();
373 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
374 || GET_CODE (trueop) == CONST_VECTOR)
376 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
377 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
378 rtvec v = rtvec_alloc (n_elts);
379 unsigned int i;
381 if (GET_CODE (trueop) != CONST_VECTOR)
382 for (i = 0; i < n_elts; i++)
383 RTVEC_ELT (v, i) = trueop;
384 else
386 enum machine_mode inmode = GET_MODE (trueop);
387 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
388 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
390 if (in_n_elts >= n_elts || n_elts % in_n_elts)
391 abort ();
392 for (i = 0; i < n_elts; i++)
393 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
395 return gen_rtx_CONST_VECTOR (mode, v);
398 else if (GET_CODE (op) == CONST)
399 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
401 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
403 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
404 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
405 enum machine_mode opmode = GET_MODE (trueop);
406 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
407 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
408 rtvec v = rtvec_alloc (n_elts);
409 unsigned int i;
411 if (op_n_elts != n_elts)
412 abort ();
414 for (i = 0; i < n_elts; i++)
416 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
417 CONST_VECTOR_ELT (trueop, i),
418 GET_MODE_INNER (opmode));
419 if (!x)
420 return 0;
421 RTVEC_ELT (v, i) = x;
423 return gen_rtx_CONST_VECTOR (mode, v);
426 /* The order of these tests is critical so that, for example, we don't
427 check the wrong mode (input vs. output) for a conversion operation,
428 such as FIX. At some point, this should be simplified. */
430 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
431 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
433 HOST_WIDE_INT hv, lv;
434 REAL_VALUE_TYPE d;
436 if (GET_CODE (trueop) == CONST_INT)
437 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
438 else
439 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
441 REAL_VALUE_FROM_INT (d, lv, hv, mode);
442 d = real_value_truncate (mode, d);
443 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
445 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
446 && (GET_CODE (trueop) == CONST_DOUBLE
447 || GET_CODE (trueop) == CONST_INT))
449 HOST_WIDE_INT hv, lv;
450 REAL_VALUE_TYPE d;
452 if (GET_CODE (trueop) == CONST_INT)
453 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
454 else
455 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
457 if (op_mode == VOIDmode)
459 /* We don't know how to interpret negative-looking numbers in
460 this case, so don't try to fold those. */
461 if (hv < 0)
462 return 0;
464 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
466 else
467 hv = 0, lv &= GET_MODE_MASK (op_mode);
469 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
470 d = real_value_truncate (mode, d);
471 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
474 if (GET_CODE (trueop) == CONST_INT
475 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
477 HOST_WIDE_INT arg0 = INTVAL (trueop);
478 HOST_WIDE_INT val;
480 switch (code)
482 case NOT:
483 val = ~ arg0;
484 break;
486 case NEG:
487 val = - arg0;
488 break;
490 case ABS:
491 val = (arg0 >= 0 ? arg0 : - arg0);
492 break;
494 case FFS:
495 /* Don't use ffs here. Instead, get low order bit and then its
496 number. If arg0 is zero, this will return 0, as desired. */
497 arg0 &= GET_MODE_MASK (mode);
498 val = exact_log2 (arg0 & (- arg0)) + 1;
499 break;
501 case CLZ:
502 arg0 &= GET_MODE_MASK (mode);
503 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
505 else
506 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
507 break;
509 case CTZ:
510 arg0 &= GET_MODE_MASK (mode);
511 if (arg0 == 0)
513 /* Even if the value at zero is undefined, we have to come
514 up with some replacement. Seems good enough. */
515 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
516 val = GET_MODE_BITSIZE (mode);
518 else
519 val = exact_log2 (arg0 & -arg0);
520 break;
522 case POPCOUNT:
523 arg0 &= GET_MODE_MASK (mode);
524 val = 0;
525 while (arg0)
526 val++, arg0 &= arg0 - 1;
527 break;
529 case PARITY:
530 arg0 &= GET_MODE_MASK (mode);
531 val = 0;
532 while (arg0)
533 val++, arg0 &= arg0 - 1;
534 val &= 1;
535 break;
537 case TRUNCATE:
538 val = arg0;
539 break;
541 case ZERO_EXTEND:
542 /* When zero-extending a CONST_INT, we need to know its
543 original mode. */
544 if (op_mode == VOIDmode)
545 abort ();
546 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
548 /* If we were really extending the mode,
549 we would have to distinguish between zero-extension
550 and sign-extension. */
551 if (width != GET_MODE_BITSIZE (op_mode))
552 abort ();
553 val = arg0;
555 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
556 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
557 else
558 return 0;
559 break;
561 case SIGN_EXTEND:
562 if (op_mode == VOIDmode)
563 op_mode = mode;
564 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
566 /* If we were really extending the mode,
567 we would have to distinguish between zero-extension
568 and sign-extension. */
569 if (width != GET_MODE_BITSIZE (op_mode))
570 abort ();
571 val = arg0;
573 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
576 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
577 if (val
578 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
579 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
581 else
582 return 0;
583 break;
585 case SQRT:
586 case FLOAT_EXTEND:
587 case FLOAT_TRUNCATE:
588 case SS_TRUNCATE:
589 case US_TRUNCATE:
590 return 0;
592 default:
593 abort ();
596 val = trunc_int_for_mode (val, mode);
598 return GEN_INT (val);
601 /* We can do some operations on integer CONST_DOUBLEs. Also allow
602 for a DImode operation on a CONST_INT. */
603 else if (GET_MODE (trueop) == VOIDmode
604 && width <= HOST_BITS_PER_WIDE_INT * 2
605 && (GET_CODE (trueop) == CONST_DOUBLE
606 || GET_CODE (trueop) == CONST_INT))
608 unsigned HOST_WIDE_INT l1, lv;
609 HOST_WIDE_INT h1, hv;
611 if (GET_CODE (trueop) == CONST_DOUBLE)
612 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
613 else
614 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
616 switch (code)
618 case NOT:
619 lv = ~ l1;
620 hv = ~ h1;
621 break;
623 case NEG:
624 neg_double (l1, h1, &lv, &hv);
625 break;
627 case ABS:
628 if (h1 < 0)
629 neg_double (l1, h1, &lv, &hv);
630 else
631 lv = l1, hv = h1;
632 break;
634 case FFS:
635 hv = 0;
636 if (l1 == 0)
638 if (h1 == 0)
639 lv = 0;
640 else
641 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
643 else
644 lv = exact_log2 (l1 & -l1) + 1;
645 break;
647 case CLZ:
648 hv = 0;
649 if (h1 != 0)
650 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
651 - HOST_BITS_PER_WIDE_INT;
652 else if (l1 != 0)
653 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
654 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
655 lv = GET_MODE_BITSIZE (mode);
656 break;
658 case CTZ:
659 hv = 0;
660 if (l1 != 0)
661 lv = exact_log2 (l1 & -l1);
662 else if (h1 != 0)
663 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
664 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
665 lv = GET_MODE_BITSIZE (mode);
666 break;
668 case POPCOUNT:
669 hv = 0;
670 lv = 0;
671 while (l1)
672 lv++, l1 &= l1 - 1;
673 while (h1)
674 lv++, h1 &= h1 - 1;
675 break;
677 case PARITY:
678 hv = 0;
679 lv = 0;
680 while (l1)
681 lv++, l1 &= l1 - 1;
682 while (h1)
683 lv++, h1 &= h1 - 1;
684 lv &= 1;
685 break;
687 case TRUNCATE:
688 /* This is just a change-of-mode, so do nothing. */
689 lv = l1, hv = h1;
690 break;
692 case ZERO_EXTEND:
693 if (op_mode == VOIDmode)
694 abort ();
696 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
697 return 0;
699 hv = 0;
700 lv = l1 & GET_MODE_MASK (op_mode);
701 break;
703 case SIGN_EXTEND:
704 if (op_mode == VOIDmode
705 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
706 return 0;
707 else
709 lv = l1 & GET_MODE_MASK (op_mode);
710 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
711 && (lv & ((HOST_WIDE_INT) 1
712 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
713 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
715 hv = HWI_SIGN_EXTEND (lv);
717 break;
719 case SQRT:
720 return 0;
722 default:
723 return 0;
726 return immed_double_const (lv, hv, mode);
729 else if (GET_CODE (trueop) == CONST_DOUBLE
730 && GET_MODE_CLASS (mode) == MODE_FLOAT)
732 REAL_VALUE_TYPE d, t;
733 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
735 switch (code)
737 case SQRT:
738 if (HONOR_SNANS (mode) && real_isnan (&d))
739 return 0;
740 real_sqrt (&t, mode, &d);
741 d = t;
742 break;
743 case ABS:
744 d = REAL_VALUE_ABS (d);
745 break;
746 case NEG:
747 d = REAL_VALUE_NEGATE (d);
748 break;
749 case FLOAT_TRUNCATE:
750 d = real_value_truncate (mode, d);
751 break;
752 case FLOAT_EXTEND:
753 /* All this does is change the mode. */
754 break;
755 case FIX:
756 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
757 break;
758 case NOT:
760 long tmp[4];
761 int i;
763 real_to_target (tmp, &d, GET_MODE (trueop));
764 for (i = 0; i < 4; i++)
765 tmp[i] = ~tmp[i];
766 real_from_target (&d, tmp, mode);
768 default:
769 abort ();
771 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
774 else if (GET_CODE (trueop) == CONST_DOUBLE
775 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
776 && GET_MODE_CLASS (mode) == MODE_INT
777 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
779 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
780 operators are intentionally left unspecified (to ease implementation
781 by target backends), for consistency, this routine implements the
782 same semantics for constant folding as used by the middle-end. */
784 HOST_WIDE_INT xh, xl, th, tl;
785 REAL_VALUE_TYPE x, t;
786 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
787 switch (code)
789 case FIX:
790 if (REAL_VALUE_ISNAN (x))
791 return const0_rtx;
793 /* Test against the signed upper bound. */
794 if (width > HOST_BITS_PER_WIDE_INT)
796 th = ((unsigned HOST_WIDE_INT) 1
797 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
798 tl = -1;
800 else
802 th = 0;
803 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
805 real_from_integer (&t, VOIDmode, tl, th, 0);
806 if (REAL_VALUES_LESS (t, x))
808 xh = th;
809 xl = tl;
810 break;
813 /* Test against the signed lower bound. */
814 if (width > HOST_BITS_PER_WIDE_INT)
816 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
817 tl = 0;
819 else
821 th = -1;
822 tl = (HOST_WIDE_INT) -1 << (width - 1);
824 real_from_integer (&t, VOIDmode, tl, th, 0);
825 if (REAL_VALUES_LESS (x, t))
827 xh = th;
828 xl = tl;
829 break;
831 REAL_VALUE_TO_INT (&xl, &xh, x);
832 break;
834 case UNSIGNED_FIX:
835 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
836 return const0_rtx;
838 /* Test against the unsigned upper bound. */
839 if (width == 2*HOST_BITS_PER_WIDE_INT)
841 th = -1;
842 tl = -1;
844 else if (width >= HOST_BITS_PER_WIDE_INT)
846 th = ((unsigned HOST_WIDE_INT) 1
847 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
848 tl = -1;
850 else
852 th = 0;
853 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
855 real_from_integer (&t, VOIDmode, tl, th, 1);
856 if (REAL_VALUES_LESS (t, x))
858 xh = th;
859 xl = tl;
860 break;
863 REAL_VALUE_TO_INT (&xl, &xh, x);
864 break;
866 default:
867 abort ();
869 return immed_double_const (xl, xh, mode);
872 /* This was formerly used only for non-IEEE float.
873 eggert@twinsun.com says it is safe for IEEE also. */
874 else
876 enum rtx_code reversed;
877 rtx temp;
879 /* There are some simplifications we can do even if the operands
880 aren't constant. */
881 switch (code)
883 case NOT:
884 /* (not (not X)) == X. */
885 if (GET_CODE (op) == NOT)
886 return XEXP (op, 0);
888 /* (not (eq X Y)) == (ne X Y), etc. */
889 if (COMPARISON_P (op)
890 && (mode == BImode || STORE_FLAG_VALUE == -1)
891 && ((reversed = reversed_comparison_code (op, NULL_RTX))
892 != UNKNOWN))
893 return simplify_gen_relational (reversed, mode, VOIDmode,
894 XEXP (op, 0), XEXP (op, 1));
896 /* (not (plus X -1)) can become (neg X). */
897 if (GET_CODE (op) == PLUS
898 && XEXP (op, 1) == constm1_rtx)
899 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
901 /* Similarly, (not (neg X)) is (plus X -1). */
902 if (GET_CODE (op) == NEG)
903 return plus_constant (XEXP (op, 0), -1);
905 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
906 if (GET_CODE (op) == XOR
907 && GET_CODE (XEXP (op, 1)) == CONST_INT
908 && (temp = simplify_unary_operation (NOT, mode,
909 XEXP (op, 1),
910 mode)) != 0)
911 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
913 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
914 if (GET_CODE (op) == PLUS
915 && GET_CODE (XEXP (op, 1)) == CONST_INT
916 && mode_signbit_p (mode, XEXP (op, 1))
917 && (temp = simplify_unary_operation (NOT, mode,
918 XEXP (op, 1),
919 mode)) != 0)
920 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
924 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
925 operands other than 1, but that is not valid. We could do a
926 similar simplification for (not (lshiftrt C X)) where C is
927 just the sign bit, but this doesn't seem common enough to
928 bother with. */
929 if (GET_CODE (op) == ASHIFT
930 && XEXP (op, 0) == const1_rtx)
932 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
933 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
936 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
937 by reversing the comparison code if valid. */
938 if (STORE_FLAG_VALUE == -1
939 && COMPARISON_P (op)
940 && (reversed = reversed_comparison_code (op, NULL_RTX))
941 != UNKNOWN)
942 return simplify_gen_relational (reversed, mode, VOIDmode,
943 XEXP (op, 0), XEXP (op, 1));
945 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
946 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
947 so we can perform the above simplification. */
949 if (STORE_FLAG_VALUE == -1
950 && GET_CODE (op) == ASHIFTRT
951 && GET_CODE (XEXP (op, 1)) == CONST_INT
952 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
953 return simplify_gen_relational (GE, mode, VOIDmode,
954 XEXP (op, 0), const0_rtx);
956 break;
958 case NEG:
959 /* (neg (neg X)) == X. */
960 if (GET_CODE (op) == NEG)
961 return XEXP (op, 0);
963 /* (neg (plus X 1)) can become (not X). */
964 if (GET_CODE (op) == PLUS
965 && XEXP (op, 1) == const1_rtx)
966 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
968 /* Similarly, (neg (not X)) is (plus X 1). */
969 if (GET_CODE (op) == NOT)
970 return plus_constant (XEXP (op, 0), 1);
972 /* (neg (minus X Y)) can become (minus Y X). This transformation
973 isn't safe for modes with signed zeros, since if X and Y are
974 both +0, (minus Y X) is the same as (minus X Y). If the
975 rounding mode is towards +infinity (or -infinity) then the two
976 expressions will be rounded differently. */
977 if (GET_CODE (op) == MINUS
978 && !HONOR_SIGNED_ZEROS (mode)
979 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
980 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
981 XEXP (op, 0));
983 if (GET_CODE (op) == PLUS
984 && !HONOR_SIGNED_ZEROS (mode)
985 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
987 /* (neg (plus A C)) is simplified to (minus -C A). */
988 if (GET_CODE (XEXP (op, 1)) == CONST_INT
989 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
991 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
992 mode);
993 if (temp)
994 return simplify_gen_binary (MINUS, mode, temp,
995 XEXP (op, 0));
998 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
999 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1000 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1003 /* (neg (mult A B)) becomes (mult (neg A) B).
1004 This works even for floating-point values. */
1005 if (GET_CODE (op) == MULT
1006 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1008 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1009 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1012 /* NEG commutes with ASHIFT since it is multiplication. Only do
1013 this if we can then eliminate the NEG (e.g., if the operand
1014 is a constant). */
1015 if (GET_CODE (op) == ASHIFT)
1017 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1018 mode);
1019 if (temp)
1020 return simplify_gen_binary (ASHIFT, mode, temp,
1021 XEXP (op, 1));
1024 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1025 C is equal to the width of MODE minus 1. */
1026 if (GET_CODE (op) == ASHIFTRT
1027 && GET_CODE (XEXP (op, 1)) == CONST_INT
1028 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1029 return simplify_gen_binary (LSHIFTRT, mode,
1030 XEXP (op, 0), XEXP (op, 1));
1032 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1033 C is equal to the width of MODE minus 1. */
1034 if (GET_CODE (op) == LSHIFTRT
1035 && GET_CODE (XEXP (op, 1)) == CONST_INT
1036 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1037 return simplify_gen_binary (ASHIFTRT, mode,
1038 XEXP (op, 0), XEXP (op, 1));
1040 break;
1042 case SIGN_EXTEND:
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1046 the VAX). */
1047 if (GET_CODE (op) == TRUNCATE
1048 && GET_MODE (XEXP (op, 0)) == mode
1049 && GET_CODE (XEXP (op, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1052 return XEXP (op, 0);
1054 /* Check for a sign extension of a subreg of a promoted
1055 variable, where the promotion is sign-extended, and the
1056 target mode is the same as the variable's promotion. */
1057 if (GET_CODE (op) == SUBREG
1058 && SUBREG_PROMOTED_VAR_P (op)
1059 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1060 && GET_MODE (XEXP (op, 0)) == mode)
1061 return XEXP (op, 0);
1063 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1064 if (! POINTERS_EXTEND_UNSIGNED
1065 && mode == Pmode && GET_MODE (op) == ptr_mode
1066 && (CONSTANT_P (op)
1067 || (GET_CODE (op) == SUBREG
1068 && REG_P (SUBREG_REG (op))
1069 && REG_POINTER (SUBREG_REG (op))
1070 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1071 return convert_memory_address (Pmode, op);
1072 #endif
1073 break;
1075 case ZERO_EXTEND:
1076 /* Check for a zero extension of a subreg of a promoted
1077 variable, where the promotion is zero-extended, and the
1078 target mode is the same as the variable's promotion. */
1079 if (GET_CODE (op) == SUBREG
1080 && SUBREG_PROMOTED_VAR_P (op)
1081 && SUBREG_PROMOTED_UNSIGNED_P (op)
1082 && GET_MODE (XEXP (op, 0)) == mode)
1083 return XEXP (op, 0);
1085 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1086 if (POINTERS_EXTEND_UNSIGNED > 0
1087 && mode == Pmode && GET_MODE (op) == ptr_mode
1088 && (CONSTANT_P (op)
1089 || (GET_CODE (op) == SUBREG
1090 && REG_P (SUBREG_REG (op))
1091 && REG_POINTER (SUBREG_REG (op))
1092 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1093 return convert_memory_address (Pmode, op);
1094 #endif
1095 break;
1097 default:
1098 break;
1101 return 0;
1105 /* Subroutine of simplify_binary_operation to simplify a commutative,
1106 associative binary operation CODE with result mode MODE, operating
1107 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1108 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1109 canonicalization is possible. */
1111 static rtx
1112 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1113 rtx op0, rtx op1)
1115 rtx tem;
1117 /* Linearize the operator to the left. */
1118 if (GET_CODE (op1) == code)
1120 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1121 if (GET_CODE (op0) == code)
1123 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1124 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1127 /* "a op (b op c)" becomes "(b op c) op a". */
1128 if (! swap_commutative_operands_p (op1, op0))
1129 return simplify_gen_binary (code, mode, op1, op0);
1131 tem = op0;
1132 op0 = op1;
1133 op1 = tem;
1136 if (GET_CODE (op0) == code)
1138 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1139 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1141 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1142 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1145 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1146 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1147 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1148 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1149 if (tem != 0)
1150 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1152 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1153 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1154 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1155 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1156 if (tem != 0)
1157 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1160 return 0;
1163 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1164 and OP1. Return 0 if no simplification is possible.
1166 Don't use this for relational operations such as EQ or LT.
1167 Use simplify_relational_operation instead. */
1169 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1170 rtx op0, rtx op1)
1172 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1173 HOST_WIDE_INT val;
1174 unsigned int width = GET_MODE_BITSIZE (mode);
1175 rtx trueop0, trueop1;
1176 rtx tem;
1178 #ifdef ENABLE_CHECKING
1179 /* Relational operations don't work here. We must know the mode
1180 of the operands in order to do the comparison correctly.
1181 Assuming a full word can give incorrect results.
1182 Consider comparing 128 with -128 in QImode. */
1184 if (GET_RTX_CLASS (code) == RTX_COMPARE
1185 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
1186 abort ();
1187 #endif
1189 /* Make sure the constant is second. */
1190 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1191 && swap_commutative_operands_p (op0, op1))
1193 tem = op0, op0 = op1, op1 = tem;
1196 trueop0 = avoid_constant_pool_reference (op0);
1197 trueop1 = avoid_constant_pool_reference (op1);
1199 if (VECTOR_MODE_P (mode)
1200 && GET_CODE (trueop0) == CONST_VECTOR
1201 && GET_CODE (trueop1) == CONST_VECTOR)
1203 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1204 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1205 enum machine_mode op0mode = GET_MODE (trueop0);
1206 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1207 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1208 enum machine_mode op1mode = GET_MODE (trueop1);
1209 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1210 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1211 rtvec v = rtvec_alloc (n_elts);
1212 unsigned int i;
1214 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1215 abort ();
1217 for (i = 0; i < n_elts; i++)
1219 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1220 CONST_VECTOR_ELT (trueop0, i),
1221 CONST_VECTOR_ELT (trueop1, i));
1222 if (!x)
1223 return 0;
1224 RTVEC_ELT (v, i) = x;
1227 return gen_rtx_CONST_VECTOR (mode, v);
1230 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1231 && GET_CODE (trueop0) == CONST_DOUBLE
1232 && GET_CODE (trueop1) == CONST_DOUBLE
1233 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1235 if (code == AND
1236 || code == IOR
1237 || code == XOR)
1239 long tmp0[4];
1240 long tmp1[4];
1241 REAL_VALUE_TYPE r;
1242 int i;
1244 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1245 GET_MODE (op0));
1246 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1247 GET_MODE (op1));
1248 for (i = 0; i < 4; i++)
1250 if (code == AND)
1251 tmp0[i] &= tmp1[i];
1252 else if (code == IOR)
1253 tmp0[i] |= tmp1[i];
1254 else if (code == XOR)
1255 tmp0[i] ^= tmp1[i];
1256 else
1257 abort ();
1259 real_from_target (&r, tmp0, mode);
1260 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1262 else
1264 REAL_VALUE_TYPE f0, f1, value;
1266 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1267 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1268 f0 = real_value_truncate (mode, f0);
1269 f1 = real_value_truncate (mode, f1);
1271 if (HONOR_SNANS (mode)
1272 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1273 return 0;
1275 if (code == DIV
1276 && REAL_VALUES_EQUAL (f1, dconst0)
1277 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1278 return 0;
1280 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1281 && flag_trapping_math
1282 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1284 int s0 = REAL_VALUE_NEGATIVE (f0);
1285 int s1 = REAL_VALUE_NEGATIVE (f1);
1287 switch (code)
1289 case PLUS:
1290 /* Inf + -Inf = NaN plus exception. */
1291 if (s0 != s1)
1292 return 0;
1293 break;
1294 case MINUS:
1295 /* Inf - Inf = NaN plus exception. */
1296 if (s0 == s1)
1297 return 0;
1298 break;
1299 case DIV:
1300 /* Inf / Inf = NaN plus exception. */
1301 return 0;
1302 default:
1303 break;
1307 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1308 && flag_trapping_math
1309 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1310 || (REAL_VALUE_ISINF (f1)
1311 && REAL_VALUES_EQUAL (f0, dconst0))))
1312 /* Inf * 0 = NaN plus exception. */
1313 return 0;
1315 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1317 value = real_value_truncate (mode, value);
1318 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1322 /* We can fold some multi-word operations. */
1323 if (GET_MODE_CLASS (mode) == MODE_INT
1324 && width == HOST_BITS_PER_WIDE_INT * 2
1325 && (GET_CODE (trueop0) == CONST_DOUBLE
1326 || GET_CODE (trueop0) == CONST_INT)
1327 && (GET_CODE (trueop1) == CONST_DOUBLE
1328 || GET_CODE (trueop1) == CONST_INT))
1330 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1331 HOST_WIDE_INT h1, h2, hv, ht;
1333 if (GET_CODE (trueop0) == CONST_DOUBLE)
1334 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1335 else
1336 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1338 if (GET_CODE (trueop1) == CONST_DOUBLE)
1339 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1340 else
1341 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1343 switch (code)
1345 case MINUS:
1346 /* A - B == A + (-B). */
1347 neg_double (l2, h2, &lv, &hv);
1348 l2 = lv, h2 = hv;
1350 /* Fall through.... */
1352 case PLUS:
1353 add_double (l1, h1, l2, h2, &lv, &hv);
1354 break;
1356 case MULT:
1357 mul_double (l1, h1, l2, h2, &lv, &hv);
1358 break;
1360 case DIV:
1361 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1362 &lv, &hv, &lt, &ht))
1363 return 0;
1364 break;
1366 case MOD:
1367 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1368 &lt, &ht, &lv, &hv))
1369 return 0;
1370 break;
1372 case UDIV:
1373 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1374 &lv, &hv, &lt, &ht))
1375 return 0;
1376 break;
1378 case UMOD:
1379 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1380 &lt, &ht, &lv, &hv))
1381 return 0;
1382 break;
1384 case AND:
1385 lv = l1 & l2, hv = h1 & h2;
1386 break;
1388 case IOR:
1389 lv = l1 | l2, hv = h1 | h2;
1390 break;
1392 case XOR:
1393 lv = l1 ^ l2, hv = h1 ^ h2;
1394 break;
1396 case SMIN:
1397 if (h1 < h2
1398 || (h1 == h2
1399 && ((unsigned HOST_WIDE_INT) l1
1400 < (unsigned HOST_WIDE_INT) l2)))
1401 lv = l1, hv = h1;
1402 else
1403 lv = l2, hv = h2;
1404 break;
1406 case SMAX:
1407 if (h1 > h2
1408 || (h1 == h2
1409 && ((unsigned HOST_WIDE_INT) l1
1410 > (unsigned HOST_WIDE_INT) l2)))
1411 lv = l1, hv = h1;
1412 else
1413 lv = l2, hv = h2;
1414 break;
1416 case UMIN:
1417 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1418 || (h1 == h2
1419 && ((unsigned HOST_WIDE_INT) l1
1420 < (unsigned HOST_WIDE_INT) l2)))
1421 lv = l1, hv = h1;
1422 else
1423 lv = l2, hv = h2;
1424 break;
1426 case UMAX:
1427 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1428 || (h1 == h2
1429 && ((unsigned HOST_WIDE_INT) l1
1430 > (unsigned HOST_WIDE_INT) l2)))
1431 lv = l1, hv = h1;
1432 else
1433 lv = l2, hv = h2;
1434 break;
1436 case LSHIFTRT: case ASHIFTRT:
1437 case ASHIFT:
1438 case ROTATE: case ROTATERT:
1439 if (SHIFT_COUNT_TRUNCATED)
1440 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1442 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1443 return 0;
1445 if (code == LSHIFTRT || code == ASHIFTRT)
1446 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1447 code == ASHIFTRT);
1448 else if (code == ASHIFT)
1449 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1450 else if (code == ROTATE)
1451 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1452 else /* code == ROTATERT */
1453 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1454 break;
1456 default:
1457 return 0;
1460 return immed_double_const (lv, hv, mode);
1463 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1464 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1466 /* Even if we can't compute a constant result,
1467 there are some cases worth simplifying. */
1469 switch (code)
1471 case PLUS:
1472 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1473 when x is NaN, infinite, or finite and nonzero. They aren't
1474 when x is -0 and the rounding mode is not towards -infinity,
1475 since (-0) + 0 is then 0. */
1476 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1477 return op0;
1479 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1480 transformations are safe even for IEEE. */
1481 if (GET_CODE (op0) == NEG)
1482 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1483 else if (GET_CODE (op1) == NEG)
1484 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1486 /* (~a) + 1 -> -a */
1487 if (INTEGRAL_MODE_P (mode)
1488 && GET_CODE (op0) == NOT
1489 && trueop1 == const1_rtx)
1490 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1492 /* Handle both-operands-constant cases. We can only add
1493 CONST_INTs to constants since the sum of relocatable symbols
1494 can't be handled by most assemblers. Don't add CONST_INT
1495 to CONST_INT since overflow won't be computed properly if wider
1496 than HOST_BITS_PER_WIDE_INT. */
1498 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1499 && GET_CODE (op1) == CONST_INT)
1500 return plus_constant (op0, INTVAL (op1));
1501 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1502 && GET_CODE (op0) == CONST_INT)
1503 return plus_constant (op1, INTVAL (op0));
1505 /* See if this is something like X * C - X or vice versa or
1506 if the multiplication is written as a shift. If so, we can
1507 distribute and make a new multiply, shift, or maybe just
1508 have X (if C is 2 in the example above). But don't make
1509 something more expensive than we had before. */
1511 if (! FLOAT_MODE_P (mode))
1513 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1514 rtx lhs = op0, rhs = op1;
1516 if (GET_CODE (lhs) == NEG)
1517 coeff0 = -1, lhs = XEXP (lhs, 0);
1518 else if (GET_CODE (lhs) == MULT
1519 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1521 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1523 else if (GET_CODE (lhs) == ASHIFT
1524 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1525 && INTVAL (XEXP (lhs, 1)) >= 0
1526 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1528 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1529 lhs = XEXP (lhs, 0);
1532 if (GET_CODE (rhs) == NEG)
1533 coeff1 = -1, rhs = XEXP (rhs, 0);
1534 else if (GET_CODE (rhs) == MULT
1535 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1537 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1539 else if (GET_CODE (rhs) == ASHIFT
1540 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1541 && INTVAL (XEXP (rhs, 1)) >= 0
1542 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1544 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1545 rhs = XEXP (rhs, 0);
1548 if (rtx_equal_p (lhs, rhs))
1550 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1551 tem = simplify_gen_binary (MULT, mode, lhs,
1552 GEN_INT (coeff0 + coeff1));
1553 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1554 ? tem : 0;
1558 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1559 if ((GET_CODE (op1) == CONST_INT
1560 || GET_CODE (op1) == CONST_DOUBLE)
1561 && GET_CODE (op0) == XOR
1562 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1563 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1564 && mode_signbit_p (mode, op1))
1565 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1566 simplify_gen_binary (XOR, mode, op1,
1567 XEXP (op0, 1)));
1569 /* If one of the operands is a PLUS or a MINUS, see if we can
1570 simplify this by the associative law.
1571 Don't use the associative law for floating point.
1572 The inaccuracy makes it nonassociative,
1573 and subtle programs can break if operations are associated. */
1575 if (INTEGRAL_MODE_P (mode)
1576 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1577 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1578 || (GET_CODE (op0) == CONST
1579 && GET_CODE (XEXP (op0, 0)) == PLUS)
1580 || (GET_CODE (op1) == CONST
1581 && GET_CODE (XEXP (op1, 0)) == PLUS))
1582 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1583 return tem;
1585 /* Reassociate floating point addition only when the user
1586 specifies unsafe math optimizations. */
1587 if (FLOAT_MODE_P (mode)
1588 && flag_unsafe_math_optimizations)
1590 tem = simplify_associative_operation (code, mode, op0, op1);
1591 if (tem)
1592 return tem;
1594 break;
1596 case COMPARE:
1597 #ifdef HAVE_cc0
1598 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1599 using cc0, in which case we want to leave it as a COMPARE
1600 so we can distinguish it from a register-register-copy.
1602 In IEEE floating point, x-0 is not the same as x. */
1604 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1605 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1606 && trueop1 == CONST0_RTX (mode))
1607 return op0;
1608 #endif
1610 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1611 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1612 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1613 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1615 rtx xop00 = XEXP (op0, 0);
1616 rtx xop10 = XEXP (op1, 0);
1618 #ifdef HAVE_cc0
1619 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1620 #else
1621 if (REG_P (xop00) && REG_P (xop10)
1622 && GET_MODE (xop00) == GET_MODE (xop10)
1623 && REGNO (xop00) == REGNO (xop10)
1624 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1625 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1626 #endif
1627 return xop00;
1629 break;
1631 case MINUS:
1632 /* We can't assume x-x is 0 even with non-IEEE floating point,
1633 but since it is zero except in very strange circumstances, we
1634 will treat it as zero with -funsafe-math-optimizations. */
1635 if (rtx_equal_p (trueop0, trueop1)
1636 && ! side_effects_p (op0)
1637 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1638 return CONST0_RTX (mode);
1640 /* Change subtraction from zero into negation. (0 - x) is the
1641 same as -x when x is NaN, infinite, or finite and nonzero.
1642 But if the mode has signed zeros, and does not round towards
1643 -infinity, then 0 - 0 is 0, not -0. */
1644 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1645 return simplify_gen_unary (NEG, mode, op1, mode);
1647 /* (-1 - a) is ~a. */
1648 if (trueop0 == constm1_rtx)
1649 return simplify_gen_unary (NOT, mode, op1, mode);
1651 /* Subtracting 0 has no effect unless the mode has signed zeros
1652 and supports rounding towards -infinity. In such a case,
1653 0 - 0 is -0. */
1654 if (!(HONOR_SIGNED_ZEROS (mode)
1655 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1656 && trueop1 == CONST0_RTX (mode))
1657 return op0;
1659 /* See if this is something like X * C - X or vice versa or
1660 if the multiplication is written as a shift. If so, we can
1661 distribute and make a new multiply, shift, or maybe just
1662 have X (if C is 2 in the example above). But don't make
1663 something more expensive than we had before. */
1665 if (! FLOAT_MODE_P (mode))
1667 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1668 rtx lhs = op0, rhs = op1;
1670 if (GET_CODE (lhs) == NEG)
1671 coeff0 = -1, lhs = XEXP (lhs, 0);
1672 else if (GET_CODE (lhs) == MULT
1673 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1675 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1677 else if (GET_CODE (lhs) == ASHIFT
1678 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1679 && INTVAL (XEXP (lhs, 1)) >= 0
1680 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1682 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1683 lhs = XEXP (lhs, 0);
1686 if (GET_CODE (rhs) == NEG)
1687 coeff1 = - 1, rhs = XEXP (rhs, 0);
1688 else if (GET_CODE (rhs) == MULT
1689 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1691 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1693 else if (GET_CODE (rhs) == ASHIFT
1694 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1695 && INTVAL (XEXP (rhs, 1)) >= 0
1696 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1698 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1699 rhs = XEXP (rhs, 0);
1702 if (rtx_equal_p (lhs, rhs))
1704 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1705 tem = simplify_gen_binary (MULT, mode, lhs,
1706 GEN_INT (coeff0 - coeff1));
1707 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1708 ? tem : 0;
1712 /* (a - (-b)) -> (a + b). True even for IEEE. */
1713 if (GET_CODE (op1) == NEG)
1714 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1716 /* (-x - c) may be simplified as (-c - x). */
1717 if (GET_CODE (op0) == NEG
1718 && (GET_CODE (op1) == CONST_INT
1719 || GET_CODE (op1) == CONST_DOUBLE))
1721 tem = simplify_unary_operation (NEG, mode, op1, mode);
1722 if (tem)
1723 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1726 /* If one of the operands is a PLUS or a MINUS, see if we can
1727 simplify this by the associative law.
1728 Don't use the associative law for floating point.
1729 The inaccuracy makes it nonassociative,
1730 and subtle programs can break if operations are associated. */
1732 if (INTEGRAL_MODE_P (mode)
1733 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1734 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1735 || (GET_CODE (op0) == CONST
1736 && GET_CODE (XEXP (op0, 0)) == PLUS)
1737 || (GET_CODE (op1) == CONST
1738 && GET_CODE (XEXP (op1, 0)) == PLUS))
1739 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1740 return tem;
1742 /* Don't let a relocatable value get a negative coeff. */
1743 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1744 return simplify_gen_binary (PLUS, mode,
1745 op0,
1746 neg_const_int (mode, op1));
1748 /* (x - (x & y)) -> (x & ~y) */
1749 if (GET_CODE (op1) == AND)
1751 if (rtx_equal_p (op0, XEXP (op1, 0)))
1753 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1754 GET_MODE (XEXP (op1, 1)));
1755 return simplify_gen_binary (AND, mode, op0, tem);
1757 if (rtx_equal_p (op0, XEXP (op1, 1)))
1759 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1760 GET_MODE (XEXP (op1, 0)));
1761 return simplify_gen_binary (AND, mode, op0, tem);
1764 break;
1766 case MULT:
1767 if (trueop1 == constm1_rtx)
1768 return simplify_gen_unary (NEG, mode, op0, mode);
1770 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1771 x is NaN, since x * 0 is then also NaN. Nor is it valid
1772 when the mode has signed zeros, since multiplying a negative
1773 number by 0 will give -0, not 0. */
1774 if (!HONOR_NANS (mode)
1775 && !HONOR_SIGNED_ZEROS (mode)
1776 && trueop1 == CONST0_RTX (mode)
1777 && ! side_effects_p (op0))
1778 return op1;
1780 /* In IEEE floating point, x*1 is not equivalent to x for
1781 signalling NaNs. */
1782 if (!HONOR_SNANS (mode)
1783 && trueop1 == CONST1_RTX (mode))
1784 return op0;
1786 /* Convert multiply by constant power of two into shift unless
1787 we are still generating RTL. This test is a kludge. */
1788 if (GET_CODE (trueop1) == CONST_INT
1789 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1790 /* If the mode is larger than the host word size, and the
1791 uppermost bit is set, then this isn't a power of two due
1792 to implicit sign extension. */
1793 && (width <= HOST_BITS_PER_WIDE_INT
1794 || val != HOST_BITS_PER_WIDE_INT - 1)
1795 && ! rtx_equal_function_value_matters)
1796 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1798 /* x*2 is x+x and x*(-1) is -x */
1799 if (GET_CODE (trueop1) == CONST_DOUBLE
1800 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1801 && GET_MODE (op0) == mode)
1803 REAL_VALUE_TYPE d;
1804 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1806 if (REAL_VALUES_EQUAL (d, dconst2))
1807 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1809 if (REAL_VALUES_EQUAL (d, dconstm1))
1810 return simplify_gen_unary (NEG, mode, op0, mode);
1813 /* Reassociate multiplication, but for floating point MULTs
1814 only when the user specifies unsafe math optimizations. */
1815 if (! FLOAT_MODE_P (mode)
1816 || flag_unsafe_math_optimizations)
1818 tem = simplify_associative_operation (code, mode, op0, op1);
1819 if (tem)
1820 return tem;
1822 break;
1824 case IOR:
1825 if (trueop1 == const0_rtx)
1826 return op0;
1827 if (GET_CODE (trueop1) == CONST_INT
1828 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1829 == GET_MODE_MASK (mode)))
1830 return op1;
1831 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1832 return op0;
1833 /* A | (~A) -> -1 */
1834 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1835 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1836 && ! side_effects_p (op0)
1837 && GET_MODE_CLASS (mode) != MODE_CC)
1838 return constm1_rtx;
1839 tem = simplify_associative_operation (code, mode, op0, op1);
1840 if (tem)
1841 return tem;
1842 break;
1844 case XOR:
1845 if (trueop1 == const0_rtx)
1846 return op0;
1847 if (GET_CODE (trueop1) == CONST_INT
1848 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1849 == GET_MODE_MASK (mode)))
1850 return simplify_gen_unary (NOT, mode, op0, mode);
1851 if (trueop0 == trueop1
1852 && ! side_effects_p (op0)
1853 && GET_MODE_CLASS (mode) != MODE_CC)
1854 return const0_rtx;
1856 /* Canonicalize XOR of the most significant bit to PLUS. */
1857 if ((GET_CODE (op1) == CONST_INT
1858 || GET_CODE (op1) == CONST_DOUBLE)
1859 && mode_signbit_p (mode, op1))
1860 return simplify_gen_binary (PLUS, mode, op0, op1);
1861 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1862 if ((GET_CODE (op1) == CONST_INT
1863 || GET_CODE (op1) == CONST_DOUBLE)
1864 && GET_CODE (op0) == PLUS
1865 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1866 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1867 && mode_signbit_p (mode, XEXP (op0, 1)))
1868 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1869 simplify_gen_binary (XOR, mode, op1,
1870 XEXP (op0, 1)));
1872 tem = simplify_associative_operation (code, mode, op0, op1);
1873 if (tem)
1874 return tem;
1875 break;
1877 case AND:
1878 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1879 return const0_rtx;
1880 /* If we are turning off bits already known off in OP0, we need
1881 not do an AND. */
1882 if (GET_CODE (trueop1) == CONST_INT
1883 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1884 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1885 return op0;
1886 if (trueop0 == trueop1 && ! side_effects_p (op0)
1887 && GET_MODE_CLASS (mode) != MODE_CC)
1888 return op0;
1889 /* A & (~A) -> 0 */
1890 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1891 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1892 && ! side_effects_p (op0)
1893 && GET_MODE_CLASS (mode) != MODE_CC)
1894 return const0_rtx;
1895 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1896 ((A & N) + B) & M -> (A + B) & M
1897 Similarly if (N & M) == 0,
1898 ((A | N) + B) & M -> (A + B) & M
1899 and for - instead of + and/or ^ instead of |. */
1900 if (GET_CODE (trueop1) == CONST_INT
1901 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1902 && ~INTVAL (trueop1)
1903 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1904 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1906 rtx pmop[2];
1907 int which;
1909 pmop[0] = XEXP (op0, 0);
1910 pmop[1] = XEXP (op0, 1);
1912 for (which = 0; which < 2; which++)
1914 tem = pmop[which];
1915 switch (GET_CODE (tem))
1917 case AND:
1918 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1919 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1920 == INTVAL (trueop1))
1921 pmop[which] = XEXP (tem, 0);
1922 break;
1923 case IOR:
1924 case XOR:
1925 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1926 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1927 pmop[which] = XEXP (tem, 0);
1928 break;
1929 default:
1930 break;
1934 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1936 tem = simplify_gen_binary (GET_CODE (op0), mode,
1937 pmop[0], pmop[1]);
1938 return simplify_gen_binary (code, mode, tem, op1);
1941 tem = simplify_associative_operation (code, mode, op0, op1);
1942 if (tem)
1943 return tem;
1944 break;
1946 case UDIV:
1947 /* 0/x is 0 (or x&0 if x has side-effects). */
1948 if (trueop0 == const0_rtx)
1949 return side_effects_p (op1)
1950 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1951 : const0_rtx;
1952 /* x/1 is x. */
1953 if (trueop1 == const1_rtx)
1955 /* Handle narrowing UDIV. */
1956 rtx x = gen_lowpart_common (mode, op0);
1957 if (x)
1958 return x;
1959 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1960 return gen_lowpart_SUBREG (mode, op0);
1961 return op0;
1963 /* Convert divide by power of two into shift. */
1964 if (GET_CODE (trueop1) == CONST_INT
1965 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1966 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1967 break;
1969 case DIV:
1970 /* Handle floating point and integers separately. */
1971 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1973 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1974 safe for modes with NaNs, since 0.0 / 0.0 will then be
1975 NaN rather than 0.0. Nor is it safe for modes with signed
1976 zeros, since dividing 0 by a negative number gives -0.0 */
1977 if (trueop0 == CONST0_RTX (mode)
1978 && !HONOR_NANS (mode)
1979 && !HONOR_SIGNED_ZEROS (mode)
1980 && ! side_effects_p (op1))
1981 return op0;
1982 /* x/1.0 is x. */
1983 if (trueop1 == CONST1_RTX (mode)
1984 && !HONOR_SNANS (mode))
1985 return op0;
1987 if (GET_CODE (trueop1) == CONST_DOUBLE
1988 && trueop1 != CONST0_RTX (mode))
1990 REAL_VALUE_TYPE d;
1991 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1993 /* x/-1.0 is -x. */
1994 if (REAL_VALUES_EQUAL (d, dconstm1)
1995 && !HONOR_SNANS (mode))
1996 return simplify_gen_unary (NEG, mode, op0, mode);
1998 /* Change FP division by a constant into multiplication.
1999 Only do this with -funsafe-math-optimizations. */
2000 if (flag_unsafe_math_optimizations
2001 && !REAL_VALUES_EQUAL (d, dconst0))
2003 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2004 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2005 return simplify_gen_binary (MULT, mode, op0, tem);
2009 else
2011 /* 0/x is 0 (or x&0 if x has side-effects). */
2012 if (trueop0 == const0_rtx)
2013 return side_effects_p (op1)
2014 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2015 : const0_rtx;
2016 /* x/1 is x. */
2017 if (trueop1 == const1_rtx)
2019 /* Handle narrowing DIV. */
2020 rtx x = gen_lowpart_common (mode, op0);
2021 if (x)
2022 return x;
2023 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2024 return gen_lowpart_SUBREG (mode, op0);
2025 return op0;
2027 /* x/-1 is -x. */
2028 if (trueop1 == constm1_rtx)
2030 rtx x = gen_lowpart_common (mode, op0);
2031 if (!x)
2032 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2033 ? gen_lowpart_SUBREG (mode, op0) : op0;
2034 return simplify_gen_unary (NEG, mode, x, mode);
2037 break;
2039 case UMOD:
2040 /* 0%x is 0 (or x&0 if x has side-effects). */
2041 if (trueop0 == const0_rtx)
2042 return side_effects_p (op1)
2043 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2044 : const0_rtx;
2045 /* x%1 is 0 (of x&0 if x has side-effects). */
2046 if (trueop1 == const1_rtx)
2047 return side_effects_p (op0)
2048 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2049 : const0_rtx;
2050 /* Implement modulus by power of two as AND. */
2051 if (GET_CODE (trueop1) == CONST_INT
2052 && exact_log2 (INTVAL (trueop1)) > 0)
2053 return simplify_gen_binary (AND, mode, op0,
2054 GEN_INT (INTVAL (op1) - 1));
2055 break;
2057 case MOD:
2058 /* 0%x is 0 (or x&0 if x has side-effects). */
2059 if (trueop0 == const0_rtx)
2060 return side_effects_p (op1)
2061 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2062 : const0_rtx;
2063 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2064 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
2065 return side_effects_p (op0)
2066 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2067 : const0_rtx;
2068 break;
2070 case ROTATERT:
2071 case ROTATE:
2072 case ASHIFTRT:
2073 /* Rotating ~0 always results in ~0. */
2074 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2075 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2076 && ! side_effects_p (op1))
2077 return op0;
2079 /* Fall through.... */
2081 case ASHIFT:
2082 case LSHIFTRT:
2083 if (trueop1 == const0_rtx)
2084 return op0;
2085 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2086 return op0;
2087 break;
2089 case SMIN:
2090 if (width <= HOST_BITS_PER_WIDE_INT
2091 && GET_CODE (trueop1) == CONST_INT
2092 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2093 && ! side_effects_p (op0))
2094 return op1;
2095 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2096 return op0;
2097 tem = simplify_associative_operation (code, mode, op0, op1);
2098 if (tem)
2099 return tem;
2100 break;
2102 case SMAX:
2103 if (width <= HOST_BITS_PER_WIDE_INT
2104 && GET_CODE (trueop1) == CONST_INT
2105 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2106 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2107 && ! side_effects_p (op0))
2108 return op1;
2109 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2110 return op0;
2111 tem = simplify_associative_operation (code, mode, op0, op1);
2112 if (tem)
2113 return tem;
2114 break;
2116 case UMIN:
2117 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2118 return op1;
2119 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2120 return op0;
2121 tem = simplify_associative_operation (code, mode, op0, op1);
2122 if (tem)
2123 return tem;
2124 break;
2126 case UMAX:
2127 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2128 return op1;
2129 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2130 return op0;
2131 tem = simplify_associative_operation (code, mode, op0, op1);
2132 if (tem)
2133 return tem;
2134 break;
2136 case SS_PLUS:
2137 case US_PLUS:
2138 case SS_MINUS:
2139 case US_MINUS:
2140 /* ??? There are simplifications that can be done. */
2141 return 0;
2143 case VEC_SELECT:
2144 if (!VECTOR_MODE_P (mode))
2146 if (!VECTOR_MODE_P (GET_MODE (trueop0))
2147 || (mode
2148 != GET_MODE_INNER (GET_MODE (trueop0)))
2149 || GET_CODE (trueop1) != PARALLEL
2150 || XVECLEN (trueop1, 0) != 1
2151 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
2152 abort ();
2154 if (GET_CODE (trueop0) == CONST_VECTOR)
2155 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
2157 else
2159 if (!VECTOR_MODE_P (GET_MODE (trueop0))
2160 || (GET_MODE_INNER (mode)
2161 != GET_MODE_INNER (GET_MODE (trueop0)))
2162 || GET_CODE (trueop1) != PARALLEL)
2163 abort ();
2165 if (GET_CODE (trueop0) == CONST_VECTOR)
2167 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2168 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2169 rtvec v = rtvec_alloc (n_elts);
2170 unsigned int i;
2172 if (XVECLEN (trueop1, 0) != (int) n_elts)
2173 abort ();
2174 for (i = 0; i < n_elts; i++)
2176 rtx x = XVECEXP (trueop1, 0, i);
2178 if (GET_CODE (x) != CONST_INT)
2179 abort ();
2180 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
2183 return gen_rtx_CONST_VECTOR (mode, v);
2186 return 0;
2187 case VEC_CONCAT:
2189 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2190 ? GET_MODE (trueop0)
2191 : GET_MODE_INNER (mode));
2192 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2193 ? GET_MODE (trueop1)
2194 : GET_MODE_INNER (mode));
2196 if (!VECTOR_MODE_P (mode)
2197 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2198 != GET_MODE_SIZE (mode)))
2199 abort ();
2201 if ((VECTOR_MODE_P (op0_mode)
2202 && (GET_MODE_INNER (mode)
2203 != GET_MODE_INNER (op0_mode)))
2204 || (!VECTOR_MODE_P (op0_mode)
2205 && GET_MODE_INNER (mode) != op0_mode))
2206 abort ();
2208 if ((VECTOR_MODE_P (op1_mode)
2209 && (GET_MODE_INNER (mode)
2210 != GET_MODE_INNER (op1_mode)))
2211 || (!VECTOR_MODE_P (op1_mode)
2212 && GET_MODE_INNER (mode) != op1_mode))
2213 abort ();
2215 if ((GET_CODE (trueop0) == CONST_VECTOR
2216 || GET_CODE (trueop0) == CONST_INT
2217 || GET_CODE (trueop0) == CONST_DOUBLE)
2218 && (GET_CODE (trueop1) == CONST_VECTOR
2219 || GET_CODE (trueop1) == CONST_INT
2220 || GET_CODE (trueop1) == CONST_DOUBLE))
2222 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2223 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2224 rtvec v = rtvec_alloc (n_elts);
2225 unsigned int i;
2226 unsigned in_n_elts = 1;
2228 if (VECTOR_MODE_P (op0_mode))
2229 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2230 for (i = 0; i < n_elts; i++)
2232 if (i < in_n_elts)
2234 if (!VECTOR_MODE_P (op0_mode))
2235 RTVEC_ELT (v, i) = trueop0;
2236 else
2237 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2239 else
2241 if (!VECTOR_MODE_P (op1_mode))
2242 RTVEC_ELT (v, i) = trueop1;
2243 else
2244 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2245 i - in_n_elts);
2249 return gen_rtx_CONST_VECTOR (mode, v);
2252 return 0;
2254 default:
2255 abort ();
2258 return 0;
2261 /* Get the integer argument values in two forms:
2262 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2264 arg0 = INTVAL (trueop0);
2265 arg1 = INTVAL (trueop1);
2267 if (width < HOST_BITS_PER_WIDE_INT)
2269 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2270 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2272 arg0s = arg0;
2273 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2274 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2276 arg1s = arg1;
2277 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2278 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2280 else
2282 arg0s = arg0;
2283 arg1s = arg1;
2286 /* Compute the value of the arithmetic. */
2288 switch (code)
2290 case PLUS:
2291 val = arg0s + arg1s;
2292 break;
2294 case MINUS:
2295 val = arg0s - arg1s;
2296 break;
2298 case MULT:
2299 val = arg0s * arg1s;
2300 break;
2302 case DIV:
2303 if (arg1s == 0
2304 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2305 && arg1s == -1))
2306 return 0;
2307 val = arg0s / arg1s;
2308 break;
2310 case MOD:
2311 if (arg1s == 0
2312 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2313 && arg1s == -1))
2314 return 0;
2315 val = arg0s % arg1s;
2316 break;
2318 case UDIV:
2319 if (arg1 == 0
2320 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2321 && arg1s == -1))
2322 return 0;
2323 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2324 break;
2326 case UMOD:
2327 if (arg1 == 0
2328 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2329 && arg1s == -1))
2330 return 0;
2331 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2332 break;
2334 case AND:
2335 val = arg0 & arg1;
2336 break;
2338 case IOR:
2339 val = arg0 | arg1;
2340 break;
2342 case XOR:
2343 val = arg0 ^ arg1;
2344 break;
2346 case LSHIFTRT:
2347 /* If shift count is undefined, don't fold it; let the machine do
2348 what it wants. But truncate it if the machine will do that. */
2349 if (arg1 < 0)
2350 return 0;
2352 if (SHIFT_COUNT_TRUNCATED)
2353 arg1 %= width;
2355 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2356 break;
2358 case ASHIFT:
2359 if (arg1 < 0)
2360 return 0;
2362 if (SHIFT_COUNT_TRUNCATED)
2363 arg1 %= width;
2365 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2366 break;
2368 case ASHIFTRT:
2369 if (arg1 < 0)
2370 return 0;
2372 if (SHIFT_COUNT_TRUNCATED)
2373 arg1 %= width;
2375 val = arg0s >> arg1;
2377 /* Bootstrap compiler may not have sign extended the right shift.
2378 Manually extend the sign to insure bootstrap cc matches gcc. */
2379 if (arg0s < 0 && arg1 > 0)
2380 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2382 break;
2384 case ROTATERT:
2385 if (arg1 < 0)
2386 return 0;
2388 arg1 %= width;
2389 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2390 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2391 break;
2393 case ROTATE:
2394 if (arg1 < 0)
2395 return 0;
2397 arg1 %= width;
2398 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2399 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2400 break;
2402 case COMPARE:
2403 /* Do nothing here. */
2404 return 0;
2406 case SMIN:
2407 val = arg0s <= arg1s ? arg0s : arg1s;
2408 break;
2410 case UMIN:
2411 val = ((unsigned HOST_WIDE_INT) arg0
2412 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2413 break;
2415 case SMAX:
2416 val = arg0s > arg1s ? arg0s : arg1s;
2417 break;
2419 case UMAX:
2420 val = ((unsigned HOST_WIDE_INT) arg0
2421 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2422 break;
2424 case SS_PLUS:
2425 case US_PLUS:
2426 case SS_MINUS:
2427 case US_MINUS:
2428 /* ??? There are simplifications that can be done. */
2429 return 0;
2431 default:
2432 abort ();
2435 val = trunc_int_for_mode (val, mode);
2437 return GEN_INT (val);
2440 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2441 PLUS or MINUS.
2443 Rather than test for specific case, we do this by a brute-force method
2444 and do all possible simplifications until no more changes occur. Then
2445 we rebuild the operation.
2447 If FORCE is true, then always generate the rtx. This is used to
2448 canonicalize stuff emitted from simplify_gen_binary. Note that this
2449 can still fail if the rtx is too complex. It won't fail just because
2450 the result is not 'simpler' than the input, however. */
2452 struct simplify_plus_minus_op_data
2454 rtx op;
2455 int neg;
2458 static int
2459 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2461 const struct simplify_plus_minus_op_data *d1 = p1;
2462 const struct simplify_plus_minus_op_data *d2 = p2;
2464 return (commutative_operand_precedence (d2->op)
2465 - commutative_operand_precedence (d1->op));
2468 static rtx
2469 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2470 rtx op1, int force)
2472 struct simplify_plus_minus_op_data ops[8];
2473 rtx result, tem;
2474 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2475 int first, changed;
2476 int i, j;
2478 memset (ops, 0, sizeof ops);
2480 /* Set up the two operands and then expand them until nothing has been
2481 changed. If we run out of room in our array, give up; this should
2482 almost never happen. */
2484 ops[0].op = op0;
2485 ops[0].neg = 0;
2486 ops[1].op = op1;
2487 ops[1].neg = (code == MINUS);
2491 changed = 0;
2493 for (i = 0; i < n_ops; i++)
2495 rtx this_op = ops[i].op;
2496 int this_neg = ops[i].neg;
2497 enum rtx_code this_code = GET_CODE (this_op);
2499 switch (this_code)
2501 case PLUS:
2502 case MINUS:
2503 if (n_ops == 7)
2504 return NULL_RTX;
2506 ops[n_ops].op = XEXP (this_op, 1);
2507 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2508 n_ops++;
2510 ops[i].op = XEXP (this_op, 0);
2511 input_ops++;
2512 changed = 1;
2513 break;
2515 case NEG:
2516 ops[i].op = XEXP (this_op, 0);
2517 ops[i].neg = ! this_neg;
2518 changed = 1;
2519 break;
2521 case CONST:
2522 if (n_ops < 7
2523 && GET_CODE (XEXP (this_op, 0)) == PLUS
2524 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2525 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2527 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2528 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2529 ops[n_ops].neg = this_neg;
2530 n_ops++;
2531 input_consts++;
2532 changed = 1;
2534 break;
2536 case NOT:
2537 /* ~a -> (-a - 1) */
2538 if (n_ops != 7)
2540 ops[n_ops].op = constm1_rtx;
2541 ops[n_ops++].neg = this_neg;
2542 ops[i].op = XEXP (this_op, 0);
2543 ops[i].neg = !this_neg;
2544 changed = 1;
2546 break;
2548 case CONST_INT:
2549 if (this_neg)
2551 ops[i].op = neg_const_int (mode, this_op);
2552 ops[i].neg = 0;
2553 changed = 1;
2555 break;
2557 default:
2558 break;
2562 while (changed);
2564 /* If we only have two operands, we can't do anything. */
2565 if (n_ops <= 2 && !force)
2566 return NULL_RTX;
2568 /* Count the number of CONSTs we didn't split above. */
2569 for (i = 0; i < n_ops; i++)
2570 if (GET_CODE (ops[i].op) == CONST)
2571 input_consts++;
2573 /* Now simplify each pair of operands until nothing changes. The first
2574 time through just simplify constants against each other. */
2576 first = 1;
2579 changed = first;
2581 for (i = 0; i < n_ops - 1; i++)
2582 for (j = i + 1; j < n_ops; j++)
2584 rtx lhs = ops[i].op, rhs = ops[j].op;
2585 int lneg = ops[i].neg, rneg = ops[j].neg;
2587 if (lhs != 0 && rhs != 0
2588 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2590 enum rtx_code ncode = PLUS;
2592 if (lneg != rneg)
2594 ncode = MINUS;
2595 if (lneg)
2596 tem = lhs, lhs = rhs, rhs = tem;
2598 else if (swap_commutative_operands_p (lhs, rhs))
2599 tem = lhs, lhs = rhs, rhs = tem;
2601 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2603 /* Reject "simplifications" that just wrap the two
2604 arguments in a CONST. Failure to do so can result
2605 in infinite recursion with simplify_binary_operation
2606 when it calls us to simplify CONST operations. */
2607 if (tem
2608 && ! (GET_CODE (tem) == CONST
2609 && GET_CODE (XEXP (tem, 0)) == ncode
2610 && XEXP (XEXP (tem, 0), 0) == lhs
2611 && XEXP (XEXP (tem, 0), 1) == rhs)
2612 /* Don't allow -x + -1 -> ~x simplifications in the
2613 first pass. This allows us the chance to combine
2614 the -1 with other constants. */
2615 && ! (first
2616 && GET_CODE (tem) == NOT
2617 && XEXP (tem, 0) == rhs))
2619 lneg &= rneg;
2620 if (GET_CODE (tem) == NEG)
2621 tem = XEXP (tem, 0), lneg = !lneg;
2622 if (GET_CODE (tem) == CONST_INT && lneg)
2623 tem = neg_const_int (mode, tem), lneg = 0;
2625 ops[i].op = tem;
2626 ops[i].neg = lneg;
2627 ops[j].op = NULL_RTX;
2628 changed = 1;
2633 first = 0;
2635 while (changed);
2637 /* Pack all the operands to the lower-numbered entries. */
2638 for (i = 0, j = 0; j < n_ops; j++)
2639 if (ops[j].op)
2640 ops[i++] = ops[j];
2641 n_ops = i;
2643 /* Sort the operations based on swap_commutative_operands_p. */
2644 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2646 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2647 if (n_ops == 2
2648 && GET_CODE (ops[1].op) == CONST_INT
2649 && CONSTANT_P (ops[0].op)
2650 && ops[0].neg)
2651 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2653 /* We suppressed creation of trivial CONST expressions in the
2654 combination loop to avoid recursion. Create one manually now.
2655 The combination loop should have ensured that there is exactly
2656 one CONST_INT, and the sort will have ensured that it is last
2657 in the array and that any other constant will be next-to-last. */
2659 if (n_ops > 1
2660 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2661 && CONSTANT_P (ops[n_ops - 2].op))
2663 rtx value = ops[n_ops - 1].op;
2664 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2665 value = neg_const_int (mode, value);
2666 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2667 n_ops--;
2670 /* Count the number of CONSTs that we generated. */
2671 n_consts = 0;
2672 for (i = 0; i < n_ops; i++)
2673 if (GET_CODE (ops[i].op) == CONST)
2674 n_consts++;
2676 /* Give up if we didn't reduce the number of operands we had. Make
2677 sure we count a CONST as two operands. If we have the same
2678 number of operands, but have made more CONSTs than before, this
2679 is also an improvement, so accept it. */
2680 if (!force
2681 && (n_ops + n_consts > input_ops
2682 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2683 return NULL_RTX;
2685 /* Put a non-negated operand first, if possible. */
2687 for (i = 0; i < n_ops && ops[i].neg; i++)
2688 continue;
2689 if (i == n_ops)
2690 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2691 else if (i != 0)
2693 tem = ops[0].op;
2694 ops[0] = ops[i];
2695 ops[i].op = tem;
2696 ops[i].neg = 1;
2699 /* Now make the result by performing the requested operations. */
2700 result = ops[0].op;
2701 for (i = 1; i < n_ops; i++)
2702 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2703 mode, result, ops[i].op);
2705 return result;
2708 /* Like simplify_binary_operation except used for relational operators.
2709 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2710 also be VOIDmode.
2712 CMP_MODE specifies in which mode the comparison is done in, so it is
2713 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2714 the operands or, if both are VOIDmode, the operands are compared in
2715 "infinite precision". */
2717 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2718 enum machine_mode cmp_mode, rtx op0, rtx op1)
2720 rtx tem, trueop0, trueop1;
2722 if (cmp_mode == VOIDmode)
2723 cmp_mode = GET_MODE (op0);
2724 if (cmp_mode == VOIDmode)
2725 cmp_mode = GET_MODE (op1);
2727 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2728 if (tem)
2730 #ifdef FLOAT_STORE_FLAG_VALUE
2731 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2733 if (tem == const0_rtx)
2734 return CONST0_RTX (mode);
2735 else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2737 REAL_VALUE_TYPE val;
2738 val = FLOAT_STORE_FLAG_VALUE (mode);
2739 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2742 #endif
2744 return tem;
2747 /* For the following tests, ensure const0_rtx is op1. */
2748 if (swap_commutative_operands_p (op0, op1)
2749 || (op0 == const0_rtx && op1 != const0_rtx))
2750 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2752 /* If op0 is a compare, extract the comparison arguments from it. */
2753 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2754 return simplify_relational_operation (code, mode, VOIDmode,
2755 XEXP (op0, 0), XEXP (op0, 1));
2757 if (mode == VOIDmode
2758 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2759 || CC0_P (op0))
2760 return NULL_RTX;
2762 trueop0 = avoid_constant_pool_reference (op0);
2763 trueop1 = avoid_constant_pool_reference (op1);
2764 return simplify_relational_operation_1 (code, mode, cmp_mode,
2765 trueop0, trueop1);
2768 /* This part of simplify_relational_operation is only used when CMP_MODE
2769 is not in class MODE_CC (i.e. it is a real comparison).
2771 MODE is the mode of the result, while CMP_MODE specifies in which
2772 mode the comparison is done in, so it is the mode of the operands. */
2774 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2775 enum machine_mode cmp_mode, rtx op0, rtx op1)
2777 if (GET_CODE (op1) == CONST_INT)
2779 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2781 /* If op0 is a comparison, extract the comparison arguments form it. */
2782 if (code == NE)
2784 if (GET_MODE (op0) == cmp_mode)
2785 return simplify_rtx (op0);
2786 else
2787 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2788 XEXP (op0, 0), XEXP (op0, 1));
2790 else if (code == EQ)
2792 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
2793 if (new != UNKNOWN)
2794 return simplify_gen_relational (new, mode, VOIDmode,
2795 XEXP (op0, 0), XEXP (op0, 1));
2800 return NULL_RTX;
2803 /* Check if the given comparison (done in the given MODE) is actually a
2804 tautology or a contradiction.
2805 If no simplification is possible, this function returns zero.
2806 Otherwise, it returns either const_true_rtx or const0_rtx. */
2809 simplify_const_relational_operation (enum rtx_code code,
2810 enum machine_mode mode,
2811 rtx op0, rtx op1)
2813 int equal, op0lt, op0ltu, op1lt, op1ltu;
2814 rtx tem;
2815 rtx trueop0;
2816 rtx trueop1;
2818 if (mode == VOIDmode
2819 && (GET_MODE (op0) != VOIDmode
2820 || GET_MODE (op1) != VOIDmode))
2821 abort ();
2823 /* If op0 is a compare, extract the comparison arguments from it. */
2824 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2825 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2827 /* We can't simplify MODE_CC values since we don't know what the
2828 actual comparison is. */
2829 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2830 return 0;
2832 /* Make sure the constant is second. */
2833 if (swap_commutative_operands_p (op0, op1))
2835 tem = op0, op0 = op1, op1 = tem;
2836 code = swap_condition (code);
2839 trueop0 = avoid_constant_pool_reference (op0);
2840 trueop1 = avoid_constant_pool_reference (op1);
2842 /* For integer comparisons of A and B maybe we can simplify A - B and can
2843 then simplify a comparison of that with zero. If A and B are both either
2844 a register or a CONST_INT, this can't help; testing for these cases will
2845 prevent infinite recursion here and speed things up.
2847 If CODE is an unsigned comparison, then we can never do this optimization,
2848 because it gives an incorrect result if the subtraction wraps around zero.
2849 ANSI C defines unsigned operations such that they never overflow, and
2850 thus such cases can not be ignored; but we cannot do it even for
2851 signed comparisons for languages such as Java, so test flag_wrapv. */
2853 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2854 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2855 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2856 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2857 /* We cannot do this for == or != if tem is a nonzero address. */
2858 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2859 && code != GTU && code != GEU && code != LTU && code != LEU)
2860 return simplify_const_relational_operation (signed_condition (code),
2861 mode, tem, const0_rtx);
2863 if (flag_unsafe_math_optimizations && code == ORDERED)
2864 return const_true_rtx;
2866 if (flag_unsafe_math_optimizations && code == UNORDERED)
2867 return const0_rtx;
2869 /* For modes without NaNs, if the two operands are equal, we know the
2870 result except if they have side-effects. */
2871 if (! HONOR_NANS (GET_MODE (trueop0))
2872 && rtx_equal_p (trueop0, trueop1)
2873 && ! side_effects_p (trueop0))
2874 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2876 /* If the operands are floating-point constants, see if we can fold
2877 the result. */
2878 else if (GET_CODE (trueop0) == CONST_DOUBLE
2879 && GET_CODE (trueop1) == CONST_DOUBLE
2880 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2882 REAL_VALUE_TYPE d0, d1;
2884 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2885 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2887 /* Comparisons are unordered iff at least one of the values is NaN. */
2888 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2889 switch (code)
2891 case UNEQ:
2892 case UNLT:
2893 case UNGT:
2894 case UNLE:
2895 case UNGE:
2896 case NE:
2897 case UNORDERED:
2898 return const_true_rtx;
2899 case EQ:
2900 case LT:
2901 case GT:
2902 case LE:
2903 case GE:
2904 case LTGT:
2905 case ORDERED:
2906 return const0_rtx;
2907 default:
2908 return 0;
2911 equal = REAL_VALUES_EQUAL (d0, d1);
2912 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2913 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2916 /* Otherwise, see if the operands are both integers. */
2917 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2918 && (GET_CODE (trueop0) == CONST_DOUBLE
2919 || GET_CODE (trueop0) == CONST_INT)
2920 && (GET_CODE (trueop1) == CONST_DOUBLE
2921 || GET_CODE (trueop1) == CONST_INT))
2923 int width = GET_MODE_BITSIZE (mode);
2924 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2925 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2927 /* Get the two words comprising each integer constant. */
2928 if (GET_CODE (trueop0) == CONST_DOUBLE)
2930 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2931 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2933 else
2935 l0u = l0s = INTVAL (trueop0);
2936 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2939 if (GET_CODE (trueop1) == CONST_DOUBLE)
2941 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2942 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2944 else
2946 l1u = l1s = INTVAL (trueop1);
2947 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2950 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2951 we have to sign or zero-extend the values. */
2952 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2954 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2955 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2957 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2958 l0s |= ((HOST_WIDE_INT) (-1) << width);
2960 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2961 l1s |= ((HOST_WIDE_INT) (-1) << width);
2963 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2964 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2966 equal = (h0u == h1u && l0u == l1u);
2967 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2968 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2969 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2970 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2973 /* Otherwise, there are some code-specific tests we can make. */
2974 else
2976 /* Optimize comparisons with upper and lower bounds. */
2977 if (SCALAR_INT_MODE_P (mode)
2978 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2980 rtx mmin, mmax;
2981 int sign;
2983 if (code == GEU
2984 || code == LEU
2985 || code == GTU
2986 || code == LTU)
2987 sign = 0;
2988 else
2989 sign = 1;
2991 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
2993 tem = NULL_RTX;
2994 switch (code)
2996 case GEU:
2997 case GE:
2998 /* x >= min is always true. */
2999 if (rtx_equal_p (trueop1, mmin))
3000 tem = const_true_rtx;
3001 else
3002 break;
3004 case LEU:
3005 case LE:
3006 /* x <= max is always true. */
3007 if (rtx_equal_p (trueop1, mmax))
3008 tem = const_true_rtx;
3009 break;
3011 case GTU:
3012 case GT:
3013 /* x > max is always false. */
3014 if (rtx_equal_p (trueop1, mmax))
3015 tem = const0_rtx;
3016 break;
3018 case LTU:
3019 case LT:
3020 /* x < min is always false. */
3021 if (rtx_equal_p (trueop1, mmin))
3022 tem = const0_rtx;
3023 break;
3025 default:
3026 break;
3028 if (tem == const0_rtx
3029 || tem == const_true_rtx)
3030 return tem;
3033 switch (code)
3035 case EQ:
3036 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3037 return const0_rtx;
3038 break;
3040 case NE:
3041 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3042 return const_true_rtx;
3043 break;
3045 case LT:
3046 /* Optimize abs(x) < 0.0. */
3047 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3049 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3050 : trueop0;
3051 if (GET_CODE (tem) == ABS)
3052 return const0_rtx;
3054 break;
3056 case GE:
3057 /* Optimize abs(x) >= 0.0. */
3058 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3060 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3061 : trueop0;
3062 if (GET_CODE (tem) == ABS)
3063 return const_true_rtx;
3065 break;
3067 case UNGE:
3068 /* Optimize ! (abs(x) < 0.0). */
3069 if (trueop1 == CONST0_RTX (mode))
3071 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3072 : trueop0;
3073 if (GET_CODE (tem) == ABS)
3074 return const_true_rtx;
3076 break;
3078 default:
3079 break;
3082 return 0;
3085 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3086 as appropriate. */
3087 switch (code)
3089 case EQ:
3090 case UNEQ:
3091 return equal ? const_true_rtx : const0_rtx;
3092 case NE:
3093 case LTGT:
3094 return ! equal ? const_true_rtx : const0_rtx;
3095 case LT:
3096 case UNLT:
3097 return op0lt ? const_true_rtx : const0_rtx;
3098 case GT:
3099 case UNGT:
3100 return op1lt ? const_true_rtx : const0_rtx;
3101 case LTU:
3102 return op0ltu ? const_true_rtx : const0_rtx;
3103 case GTU:
3104 return op1ltu ? const_true_rtx : const0_rtx;
3105 case LE:
3106 case UNLE:
3107 return equal || op0lt ? const_true_rtx : const0_rtx;
3108 case GE:
3109 case UNGE:
3110 return equal || op1lt ? const_true_rtx : const0_rtx;
3111 case LEU:
3112 return equal || op0ltu ? const_true_rtx : const0_rtx;
3113 case GEU:
3114 return equal || op1ltu ? const_true_rtx : const0_rtx;
3115 case ORDERED:
3116 return const_true_rtx;
3117 case UNORDERED:
3118 return const0_rtx;
3119 default:
3120 abort ();
3124 /* Simplify CODE, an operation with result mode MODE and three operands,
3125 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3126 a constant. Return 0 if no simplifications is possible. */
3129 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3130 enum machine_mode op0_mode, rtx op0, rtx op1,
3131 rtx op2)
3133 unsigned int width = GET_MODE_BITSIZE (mode);
3135 /* VOIDmode means "infinite" precision. */
3136 if (width == 0)
3137 width = HOST_BITS_PER_WIDE_INT;
3139 switch (code)
3141 case SIGN_EXTRACT:
3142 case ZERO_EXTRACT:
3143 if (GET_CODE (op0) == CONST_INT
3144 && GET_CODE (op1) == CONST_INT
3145 && GET_CODE (op2) == CONST_INT
3146 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3147 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3149 /* Extracting a bit-field from a constant */
3150 HOST_WIDE_INT val = INTVAL (op0);
3152 if (BITS_BIG_ENDIAN)
3153 val >>= (GET_MODE_BITSIZE (op0_mode)
3154 - INTVAL (op2) - INTVAL (op1));
3155 else
3156 val >>= INTVAL (op2);
3158 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3160 /* First zero-extend. */
3161 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3162 /* If desired, propagate sign bit. */
3163 if (code == SIGN_EXTRACT
3164 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3165 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3168 /* Clear the bits that don't belong in our mode,
3169 unless they and our sign bit are all one.
3170 So we get either a reasonable negative value or a reasonable
3171 unsigned value for this mode. */
3172 if (width < HOST_BITS_PER_WIDE_INT
3173 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3174 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3175 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3177 return GEN_INT (val);
3179 break;
3181 case IF_THEN_ELSE:
3182 if (GET_CODE (op0) == CONST_INT)
3183 return op0 != const0_rtx ? op1 : op2;
3185 /* Convert c ? a : a into "a". */
3186 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3187 return op1;
3189 /* Convert a != b ? a : b into "a". */
3190 if (GET_CODE (op0) == NE
3191 && ! side_effects_p (op0)
3192 && ! HONOR_NANS (mode)
3193 && ! HONOR_SIGNED_ZEROS (mode)
3194 && ((rtx_equal_p (XEXP (op0, 0), op1)
3195 && rtx_equal_p (XEXP (op0, 1), op2))
3196 || (rtx_equal_p (XEXP (op0, 0), op2)
3197 && rtx_equal_p (XEXP (op0, 1), op1))))
3198 return op1;
3200 /* Convert a == b ? a : b into "b". */
3201 if (GET_CODE (op0) == EQ
3202 && ! side_effects_p (op0)
3203 && ! HONOR_NANS (mode)
3204 && ! HONOR_SIGNED_ZEROS (mode)
3205 && ((rtx_equal_p (XEXP (op0, 0), op1)
3206 && rtx_equal_p (XEXP (op0, 1), op2))
3207 || (rtx_equal_p (XEXP (op0, 0), op2)
3208 && rtx_equal_p (XEXP (op0, 1), op1))))
3209 return op2;
3211 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3213 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3214 ? GET_MODE (XEXP (op0, 1))
3215 : GET_MODE (XEXP (op0, 0)));
3216 rtx temp;
3218 /* Look for happy constants in op1 and op2. */
3219 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3221 HOST_WIDE_INT t = INTVAL (op1);
3222 HOST_WIDE_INT f = INTVAL (op2);
3224 if (t == STORE_FLAG_VALUE && f == 0)
3225 code = GET_CODE (op0);
3226 else if (t == 0 && f == STORE_FLAG_VALUE)
3228 enum rtx_code tmp;
3229 tmp = reversed_comparison_code (op0, NULL_RTX);
3230 if (tmp == UNKNOWN)
3231 break;
3232 code = tmp;
3234 else
3235 break;
3237 return simplify_gen_relational (code, mode, cmp_mode,
3238 XEXP (op0, 0), XEXP (op0, 1));
3241 if (cmp_mode == VOIDmode)
3242 cmp_mode = op0_mode;
3243 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3244 cmp_mode, XEXP (op0, 0),
3245 XEXP (op0, 1));
3247 /* See if any simplifications were possible. */
3248 if (temp)
3250 if (GET_CODE (temp) == CONST_INT)
3251 return temp == const0_rtx ? op2 : op1;
3252 else if (temp)
3253 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3256 break;
3258 case VEC_MERGE:
3259 if (GET_MODE (op0) != mode
3260 || GET_MODE (op1) != mode
3261 || !VECTOR_MODE_P (mode))
3262 abort ();
3263 op2 = avoid_constant_pool_reference (op2);
3264 if (GET_CODE (op2) == CONST_INT)
3266 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3267 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3268 int mask = (1 << n_elts) - 1;
3270 if (!(INTVAL (op2) & mask))
3271 return op1;
3272 if ((INTVAL (op2) & mask) == mask)
3273 return op0;
3275 op0 = avoid_constant_pool_reference (op0);
3276 op1 = avoid_constant_pool_reference (op1);
3277 if (GET_CODE (op0) == CONST_VECTOR
3278 && GET_CODE (op1) == CONST_VECTOR)
3280 rtvec v = rtvec_alloc (n_elts);
3281 unsigned int i;
3283 for (i = 0; i < n_elts; i++)
3284 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3285 ? CONST_VECTOR_ELT (op0, i)
3286 : CONST_VECTOR_ELT (op1, i));
3287 return gen_rtx_CONST_VECTOR (mode, v);
3290 break;
3292 default:
3293 abort ();
3296 return 0;
3299 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3300 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3302 Works by unpacking OP into a collection of 8-bit values
3303 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3304 and then repacking them again for OUTERMODE. */
3306 static rtx
3307 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3308 enum machine_mode innermode, unsigned int byte)
3310 /* We support up to 512-bit values (for V8DFmode). */
3311 enum {
3312 max_bitsize = 512,
3313 value_bit = 8,
3314 value_mask = (1 << value_bit) - 1
3316 unsigned char value[max_bitsize / value_bit];
3317 int value_start;
3318 int i;
3319 int elem;
3321 int num_elem;
3322 rtx * elems;
3323 int elem_bitsize;
3324 rtx result_s;
3325 rtvec result_v = NULL;
3326 enum mode_class outer_class;
3327 enum machine_mode outer_submode;
3329 /* Some ports misuse CCmode. */
3330 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3331 return op;
3333 /* Unpack the value. */
3335 if (GET_CODE (op) == CONST_VECTOR)
3337 num_elem = CONST_VECTOR_NUNITS (op);
3338 elems = &CONST_VECTOR_ELT (op, 0);
3339 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3341 else
3343 num_elem = 1;
3344 elems = &op;
3345 elem_bitsize = max_bitsize;
3348 if (BITS_PER_UNIT % value_bit != 0)
3349 abort (); /* Too complicated; reducing value_bit may help. */
3350 if (elem_bitsize % BITS_PER_UNIT != 0)
3351 abort (); /* I don't know how to handle endianness of sub-units. */
3353 for (elem = 0; elem < num_elem; elem++)
3355 unsigned char * vp;
3356 rtx el = elems[elem];
3358 /* Vectors are kept in target memory order. (This is probably
3359 a mistake.) */
3361 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3362 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3363 / BITS_PER_UNIT);
3364 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3365 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3366 unsigned bytele = (subword_byte % UNITS_PER_WORD
3367 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3368 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3371 switch (GET_CODE (el))
3373 case CONST_INT:
3374 for (i = 0;
3375 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3376 i += value_bit)
3377 *vp++ = INTVAL (el) >> i;
3378 /* CONST_INTs are always logically sign-extended. */
3379 for (; i < elem_bitsize; i += value_bit)
3380 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3381 break;
3383 case CONST_DOUBLE:
3384 if (GET_MODE (el) == VOIDmode)
3386 /* If this triggers, someone should have generated a
3387 CONST_INT instead. */
3388 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3389 abort ();
3391 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3392 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3393 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3395 *vp++
3396 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3397 i += value_bit;
3399 /* It shouldn't matter what's done here, so fill it with
3400 zero. */
3401 for (; i < max_bitsize; i += value_bit)
3402 *vp++ = 0;
3404 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3406 long tmp[max_bitsize / 32];
3407 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3409 if (bitsize > elem_bitsize)
3410 abort ();
3411 if (bitsize % value_bit != 0)
3412 abort ();
3414 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3415 GET_MODE (el));
3417 /* real_to_target produces its result in words affected by
3418 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3419 and use WORDS_BIG_ENDIAN instead; see the documentation
3420 of SUBREG in rtl.texi. */
3421 for (i = 0; i < bitsize; i += value_bit)
3423 int ibase;
3424 if (WORDS_BIG_ENDIAN)
3425 ibase = bitsize - 1 - i;
3426 else
3427 ibase = i;
3428 *vp++ = tmp[ibase / 32] >> i % 32;
3431 /* It shouldn't matter what's done here, so fill it with
3432 zero. */
3433 for (; i < elem_bitsize; i += value_bit)
3434 *vp++ = 0;
3436 else
3437 abort ();
3438 break;
3440 default:
3441 abort ();
3445 /* Now, pick the right byte to start with. */
3446 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3447 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3448 will already have offset 0. */
3449 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3451 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3452 - byte);
3453 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3454 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3455 byte = (subword_byte % UNITS_PER_WORD
3456 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3459 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3460 so if it's become negative it will instead be very large.) */
3461 if (byte >= GET_MODE_SIZE (innermode))
3462 abort ();
3464 /* Convert from bytes to chunks of size value_bit. */
3465 value_start = byte * (BITS_PER_UNIT / value_bit);
3467 /* Re-pack the value. */
3469 if (VECTOR_MODE_P (outermode))
3471 num_elem = GET_MODE_NUNITS (outermode);
3472 result_v = rtvec_alloc (num_elem);
3473 elems = &RTVEC_ELT (result_v, 0);
3474 outer_submode = GET_MODE_INNER (outermode);
3476 else
3478 num_elem = 1;
3479 elems = &result_s;
3480 outer_submode = outermode;
3483 outer_class = GET_MODE_CLASS (outer_submode);
3484 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3486 if (elem_bitsize % value_bit != 0)
3487 abort ();
3488 if (elem_bitsize + value_start * value_bit > max_bitsize)
3489 abort ();
3491 for (elem = 0; elem < num_elem; elem++)
3493 unsigned char *vp;
3495 /* Vectors are stored in target memory order. (This is probably
3496 a mistake.) */
3498 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3499 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3500 / BITS_PER_UNIT);
3501 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3502 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3503 unsigned bytele = (subword_byte % UNITS_PER_WORD
3504 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3505 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3508 switch (outer_class)
3510 case MODE_INT:
3511 case MODE_PARTIAL_INT:
3513 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3515 for (i = 0;
3516 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3517 i += value_bit)
3518 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3519 for (; i < elem_bitsize; i += value_bit)
3520 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3521 << (i - HOST_BITS_PER_WIDE_INT));
3523 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3524 know why. */
3525 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3526 elems[elem] = gen_int_mode (lo, outer_submode);
3527 else
3528 elems[elem] = immed_double_const (lo, hi, outer_submode);
3530 break;
3532 case MODE_FLOAT:
3534 REAL_VALUE_TYPE r;
3535 long tmp[max_bitsize / 32];
3537 /* real_from_target wants its input in words affected by
3538 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3539 and use WORDS_BIG_ENDIAN instead; see the documentation
3540 of SUBREG in rtl.texi. */
3541 for (i = 0; i < max_bitsize / 32; i++)
3542 tmp[i] = 0;
3543 for (i = 0; i < elem_bitsize; i += value_bit)
3545 int ibase;
3546 if (WORDS_BIG_ENDIAN)
3547 ibase = elem_bitsize - 1 - i;
3548 else
3549 ibase = i;
3550 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3553 real_from_target (&r, tmp, outer_submode);
3554 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3556 break;
3558 default:
3559 abort ();
3562 if (VECTOR_MODE_P (outermode))
3563 return gen_rtx_CONST_VECTOR (outermode, result_v);
3564 else
3565 return result_s;
3568 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3569 Return 0 if no simplifications are possible. */
3571 simplify_subreg (enum machine_mode outermode, rtx op,
3572 enum machine_mode innermode, unsigned int byte)
3574 /* Little bit of sanity checking. */
3575 if (innermode == VOIDmode || outermode == VOIDmode
3576 || innermode == BLKmode || outermode == BLKmode)
3577 abort ();
3579 if (GET_MODE (op) != innermode
3580 && GET_MODE (op) != VOIDmode)
3581 abort ();
3583 if (byte % GET_MODE_SIZE (outermode)
3584 || byte >= GET_MODE_SIZE (innermode))
3585 abort ();
3587 if (outermode == innermode && !byte)
3588 return op;
3590 if (GET_CODE (op) == CONST_INT
3591 || GET_CODE (op) == CONST_DOUBLE
3592 || GET_CODE (op) == CONST_VECTOR)
3593 return simplify_immed_subreg (outermode, op, innermode, byte);
3595 /* Changing mode twice with SUBREG => just change it once,
3596 or not at all if changing back op starting mode. */
3597 if (GET_CODE (op) == SUBREG)
3599 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3600 int final_offset = byte + SUBREG_BYTE (op);
3601 rtx new;
3603 if (outermode == innermostmode
3604 && byte == 0 && SUBREG_BYTE (op) == 0)
3605 return SUBREG_REG (op);
3607 /* The SUBREG_BYTE represents offset, as if the value were stored
3608 in memory. Irritating exception is paradoxical subreg, where
3609 we define SUBREG_BYTE to be 0. On big endian machines, this
3610 value should be negative. For a moment, undo this exception. */
3611 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3613 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3614 if (WORDS_BIG_ENDIAN)
3615 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3616 if (BYTES_BIG_ENDIAN)
3617 final_offset += difference % UNITS_PER_WORD;
3619 if (SUBREG_BYTE (op) == 0
3620 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3622 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3623 if (WORDS_BIG_ENDIAN)
3624 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3625 if (BYTES_BIG_ENDIAN)
3626 final_offset += difference % UNITS_PER_WORD;
3629 /* See whether resulting subreg will be paradoxical. */
3630 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3632 /* In nonparadoxical subregs we can't handle negative offsets. */
3633 if (final_offset < 0)
3634 return NULL_RTX;
3635 /* Bail out in case resulting subreg would be incorrect. */
3636 if (final_offset % GET_MODE_SIZE (outermode)
3637 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3638 return NULL_RTX;
3640 else
3642 int offset = 0;
3643 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3645 /* In paradoxical subreg, see if we are still looking on lower part.
3646 If so, our SUBREG_BYTE will be 0. */
3647 if (WORDS_BIG_ENDIAN)
3648 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3649 if (BYTES_BIG_ENDIAN)
3650 offset += difference % UNITS_PER_WORD;
3651 if (offset == final_offset)
3652 final_offset = 0;
3653 else
3654 return NULL_RTX;
3657 /* Recurse for further possible simplifications. */
3658 new = simplify_subreg (outermode, SUBREG_REG (op),
3659 GET_MODE (SUBREG_REG (op)),
3660 final_offset);
3661 if (new)
3662 return new;
3663 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3666 /* SUBREG of a hard register => just change the register number
3667 and/or mode. If the hard register is not valid in that mode,
3668 suppress this simplification. If the hard register is the stack,
3669 frame, or argument pointer, leave this as a SUBREG. */
3671 if (REG_P (op)
3672 && (! REG_FUNCTION_VALUE_P (op)
3673 || ! rtx_equal_function_value_matters)
3674 && REGNO (op) < FIRST_PSEUDO_REGISTER
3675 #ifdef CANNOT_CHANGE_MODE_CLASS
3676 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3677 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3678 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3679 #endif
3680 && ((reload_completed && !frame_pointer_needed)
3681 || (REGNO (op) != FRAME_POINTER_REGNUM
3682 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3683 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3684 #endif
3686 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3687 && REGNO (op) != ARG_POINTER_REGNUM
3688 #endif
3689 && REGNO (op) != STACK_POINTER_REGNUM
3690 && subreg_offset_representable_p (REGNO (op), innermode,
3691 byte, outermode))
3693 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3694 int final_regno = subreg_hard_regno (tem, 0);
3696 /* ??? We do allow it if the current REG is not valid for
3697 its mode. This is a kludge to work around how float/complex
3698 arguments are passed on 32-bit SPARC and should be fixed. */
3699 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3700 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3702 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3704 /* Propagate original regno. We don't have any way to specify
3705 the offset inside original regno, so do so only for lowpart.
3706 The information is used only by alias analysis that can not
3707 grog partial register anyway. */
3709 if (subreg_lowpart_offset (outermode, innermode) == byte)
3710 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3711 return x;
3715 /* If we have a SUBREG of a register that we are replacing and we are
3716 replacing it with a MEM, make a new MEM and try replacing the
3717 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3718 or if we would be widening it. */
3720 if (MEM_P (op)
3721 && ! mode_dependent_address_p (XEXP (op, 0))
3722 /* Allow splitting of volatile memory references in case we don't
3723 have instruction to move the whole thing. */
3724 && (! MEM_VOLATILE_P (op)
3725 || ! have_insn_for (SET, innermode))
3726 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3727 return adjust_address_nv (op, outermode, byte);
3729 /* Handle complex values represented as CONCAT
3730 of real and imaginary part. */
3731 if (GET_CODE (op) == CONCAT)
3733 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3734 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3735 unsigned int final_offset;
3736 rtx res;
3738 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3739 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3740 if (res)
3741 return res;
3742 /* We can at least simplify it by referring directly to the
3743 relevant part. */
3744 return gen_rtx_SUBREG (outermode, part, final_offset);
3747 /* Optimize SUBREG truncations of zero and sign extended values. */
3748 if ((GET_CODE (op) == ZERO_EXTEND
3749 || GET_CODE (op) == SIGN_EXTEND)
3750 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3752 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3754 /* If we're requesting the lowpart of a zero or sign extension,
3755 there are three possibilities. If the outermode is the same
3756 as the origmode, we can omit both the extension and the subreg.
3757 If the outermode is not larger than the origmode, we can apply
3758 the truncation without the extension. Finally, if the outermode
3759 is larger than the origmode, but both are integer modes, we
3760 can just extend to the appropriate mode. */
3761 if (bitpos == 0)
3763 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3764 if (outermode == origmode)
3765 return XEXP (op, 0);
3766 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3767 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3768 subreg_lowpart_offset (outermode,
3769 origmode));
3770 if (SCALAR_INT_MODE_P (outermode))
3771 return simplify_gen_unary (GET_CODE (op), outermode,
3772 XEXP (op, 0), origmode);
3775 /* A SUBREG resulting from a zero extension may fold to zero if
3776 it extracts higher bits that the ZERO_EXTEND's source bits. */
3777 if (GET_CODE (op) == ZERO_EXTEND
3778 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3779 return CONST0_RTX (outermode);
3782 return NULL_RTX;
3785 /* Make a SUBREG operation or equivalent if it folds. */
3788 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3789 enum machine_mode innermode, unsigned int byte)
3791 rtx new;
3792 /* Little bit of sanity checking. */
3793 if (innermode == VOIDmode || outermode == VOIDmode
3794 || innermode == BLKmode || outermode == BLKmode)
3795 abort ();
3797 if (GET_MODE (op) != innermode
3798 && GET_MODE (op) != VOIDmode)
3799 abort ();
3801 if (byte % GET_MODE_SIZE (outermode)
3802 || byte >= GET_MODE_SIZE (innermode))
3803 abort ();
3805 new = simplify_subreg (outermode, op, innermode, byte);
3806 if (new)
3807 return new;
3809 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3810 return NULL_RTX;
3812 return gen_rtx_SUBREG (outermode, op, byte);
3814 /* Simplify X, an rtx expression.
3816 Return the simplified expression or NULL if no simplifications
3817 were possible.
3819 This is the preferred entry point into the simplification routines;
3820 however, we still allow passes to call the more specific routines.
3822 Right now GCC has three (yes, three) major bodies of RTL simplification
3823 code that need to be unified.
3825 1. fold_rtx in cse.c. This code uses various CSE specific
3826 information to aid in RTL simplification.
3828 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3829 it uses combine specific information to aid in RTL
3830 simplification.
3832 3. The routines in this file.
3835 Long term we want to only have one body of simplification code; to
3836 get to that state I recommend the following steps:
3838 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3839 which are not pass dependent state into these routines.
3841 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3842 use this routine whenever possible.
3844 3. Allow for pass dependent state to be provided to these
3845 routines and add simplifications based on the pass dependent
3846 state. Remove code from cse.c & combine.c that becomes
3847 redundant/dead.
3849 It will take time, but ultimately the compiler will be easier to
3850 maintain and improve. It's totally silly that when we add a
3851 simplification that it needs to be added to 4 places (3 for RTL
3852 simplification and 1 for tree simplification. */
3855 simplify_rtx (rtx x)
3857 enum rtx_code code = GET_CODE (x);
3858 enum machine_mode mode = GET_MODE (x);
3860 switch (GET_RTX_CLASS (code))
3862 case RTX_UNARY:
3863 return simplify_unary_operation (code, mode,
3864 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3865 case RTX_COMM_ARITH:
3866 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3867 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3869 /* Fall through.... */
3871 case RTX_BIN_ARITH:
3872 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3874 case RTX_TERNARY:
3875 case RTX_BITFIELD_OPS:
3876 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3877 XEXP (x, 0), XEXP (x, 1),
3878 XEXP (x, 2));
3880 case RTX_COMPARE:
3881 case RTX_COMM_COMPARE:
3882 return simplify_relational_operation (code, mode,
3883 ((GET_MODE (XEXP (x, 0))
3884 != VOIDmode)
3885 ? GET_MODE (XEXP (x, 0))
3886 : GET_MODE (XEXP (x, 1))),
3887 XEXP (x, 0),
3888 XEXP (x, 1));
3890 case RTX_EXTRA:
3891 if (code == SUBREG)
3892 return simplify_gen_subreg (mode, SUBREG_REG (x),
3893 GET_MODE (SUBREG_REG (x)),
3894 SUBREG_BYTE (x));
3895 break;
3897 case RTX_OBJ:
3898 if (code == LO_SUM)
3900 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3901 if (GET_CODE (XEXP (x, 0)) == HIGH
3902 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3903 return XEXP (x, 1);
3905 break;
3907 default:
3908 break;
3910 return NULL;