* doc/md.texi (shift patterns): New anchor. Add reference to
[official-gcc.git] / gcc / simplify-rtx.c
blob285f898de80b86043cf9626044b9c832fde78ad3
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool mode_signbit_p (enum machine_mode, rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, rtx i)
69 return gen_int_mode (- INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 static bool
76 mode_signbit_p (enum machine_mode mode, rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_BITSIZE (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x) == CONST_INT)
90 val = INTVAL (x);
91 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
98 else
99 return false;
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
110 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
111 rtx op1)
113 rtx tem;
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
122 if (tem)
123 return tem;
125 /* Handle addition and subtraction specially. Otherwise, just form
126 the operation. */
128 if (code == PLUS || code == MINUS)
130 tem = simplify_plus_minus (code, mode, op0, op1, 1);
131 if (tem)
132 return tem;
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
141 avoid_constant_pool_reference (rtx x)
143 rtx c, tmp, addr;
144 enum machine_mode cmode;
146 switch (GET_CODE (x))
148 case MEM:
149 break;
151 case FLOAT_EXTEND:
152 /* Handle float extensions of constant pool references. */
153 tmp = XEXP (x, 0);
154 c = avoid_constant_pool_reference (tmp);
155 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
157 REAL_VALUE_TYPE d;
159 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
162 return x;
164 default:
165 return x;
168 addr = XEXP (x, 0);
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr = targetm.delegitimize_address (addr);
173 if (GET_CODE (addr) == LO_SUM)
174 addr = XEXP (addr, 1);
176 if (GET_CODE (addr) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr))
178 return x;
180 c = get_pool_constant (addr);
181 cmode = get_pool_mode (addr);
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode != GET_MODE (x))
188 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
189 return c ? c : x;
192 return c;
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
199 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
200 enum machine_mode op_mode)
202 rtx tem;
204 /* If this simplifies, use it. */
205 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
206 return tem;
208 return gen_rtx_fmt_e (code, mode, op);
211 /* Likewise for ternary operations. */
214 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
215 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
217 rtx tem;
219 /* If this simplifies, use it. */
220 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
221 op0, op1, op2)))
222 return tem;
224 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
231 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
232 enum machine_mode cmp_mode, rtx op0, rtx op1)
234 rtx tem;
236 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
237 op0, op1)))
238 return tem;
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
243 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251 enum machine_mode op_mode;
252 rtx op0, op1, op2;
254 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
258 if (x == old_rtx)
259 return new_rtx;
261 switch (GET_RTX_CLASS (code))
263 case RTX_UNARY:
264 op0 = XEXP (x, 0);
265 op_mode = GET_MODE (op0);
266 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
267 if (op0 == XEXP (x, 0))
268 return x;
269 return simplify_gen_unary (code, mode, op0, op_mode);
271 case RTX_BIN_ARITH:
272 case RTX_COMM_ARITH:
273 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
274 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
275 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
276 return x;
277 return simplify_gen_binary (code, mode, op0, op1);
279 case RTX_COMPARE:
280 case RTX_COMM_COMPARE:
281 op0 = XEXP (x, 0);
282 op1 = XEXP (x, 1);
283 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
284 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return x;
288 return simplify_gen_relational (code, mode, op_mode, op0, op1);
290 case RTX_TERNARY:
291 case RTX_BITFIELD_OPS:
292 op0 = XEXP (x, 0);
293 op_mode = GET_MODE (op0);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
296 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
298 return x;
299 if (op_mode == VOIDmode)
300 op_mode = GET_MODE (op0);
301 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
303 case RTX_EXTRA:
304 /* The only case we try to handle is a SUBREG. */
305 if (code == SUBREG)
307 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
308 if (op0 == SUBREG_REG (x))
309 return x;
310 op0 = simplify_gen_subreg (GET_MODE (x), op0,
311 GET_MODE (SUBREG_REG (x)),
312 SUBREG_BYTE (x));
313 return op0 ? op0 : x;
315 break;
317 case RTX_OBJ:
318 if (code == MEM)
320 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
321 if (op0 == XEXP (x, 0))
322 return x;
323 return replace_equiv_address_nv (x, op0);
325 else if (code == LO_SUM)
327 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
328 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
332 return op1;
334 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
335 return x;
336 return gen_rtx_LO_SUM (mode, op0, op1);
338 else if (code == REG)
340 if (REG_P (old_rtx) && REGNO (x) == REGNO (old_rtx))
341 return new_rtx;
343 break;
345 default:
346 break;
348 return x;
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
355 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
356 rtx op, enum machine_mode op_mode)
358 unsigned int width = GET_MODE_BITSIZE (mode);
359 rtx trueop = avoid_constant_pool_reference (op);
361 if (code == VEC_DUPLICATE)
363 if (!VECTOR_MODE_P (mode))
364 abort ();
365 if (GET_MODE (trueop) != VOIDmode
366 && !VECTOR_MODE_P (GET_MODE (trueop))
367 && GET_MODE_INNER (mode) != GET_MODE (trueop))
368 abort ();
369 if (GET_MODE (trueop) != VOIDmode
370 && VECTOR_MODE_P (GET_MODE (trueop))
371 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
372 abort ();
373 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
374 || GET_CODE (trueop) == CONST_VECTOR)
376 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
377 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
378 rtvec v = rtvec_alloc (n_elts);
379 unsigned int i;
381 if (GET_CODE (trueop) != CONST_VECTOR)
382 for (i = 0; i < n_elts; i++)
383 RTVEC_ELT (v, i) = trueop;
384 else
386 enum machine_mode inmode = GET_MODE (trueop);
387 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
388 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
390 if (in_n_elts >= n_elts || n_elts % in_n_elts)
391 abort ();
392 for (i = 0; i < n_elts; i++)
393 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
395 return gen_rtx_CONST_VECTOR (mode, v);
398 else if (GET_CODE (op) == CONST)
399 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
401 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
403 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
404 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
405 enum machine_mode opmode = GET_MODE (trueop);
406 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
407 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
408 rtvec v = rtvec_alloc (n_elts);
409 unsigned int i;
411 if (op_n_elts != n_elts)
412 abort ();
414 for (i = 0; i < n_elts; i++)
416 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
417 CONST_VECTOR_ELT (trueop, i),
418 GET_MODE_INNER (opmode));
419 if (!x)
420 return 0;
421 RTVEC_ELT (v, i) = x;
423 return gen_rtx_CONST_VECTOR (mode, v);
426 /* The order of these tests is critical so that, for example, we don't
427 check the wrong mode (input vs. output) for a conversion operation,
428 such as FIX. At some point, this should be simplified. */
430 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
431 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
433 HOST_WIDE_INT hv, lv;
434 REAL_VALUE_TYPE d;
436 if (GET_CODE (trueop) == CONST_INT)
437 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
438 else
439 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
441 REAL_VALUE_FROM_INT (d, lv, hv, mode);
442 d = real_value_truncate (mode, d);
443 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
445 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
446 && (GET_CODE (trueop) == CONST_DOUBLE
447 || GET_CODE (trueop) == CONST_INT))
449 HOST_WIDE_INT hv, lv;
450 REAL_VALUE_TYPE d;
452 if (GET_CODE (trueop) == CONST_INT)
453 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
454 else
455 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
457 if (op_mode == VOIDmode)
459 /* We don't know how to interpret negative-looking numbers in
460 this case, so don't try to fold those. */
461 if (hv < 0)
462 return 0;
464 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
466 else
467 hv = 0, lv &= GET_MODE_MASK (op_mode);
469 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
470 d = real_value_truncate (mode, d);
471 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
474 if (GET_CODE (trueop) == CONST_INT
475 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
477 HOST_WIDE_INT arg0 = INTVAL (trueop);
478 HOST_WIDE_INT val;
480 switch (code)
482 case NOT:
483 val = ~ arg0;
484 break;
486 case NEG:
487 val = - arg0;
488 break;
490 case ABS:
491 val = (arg0 >= 0 ? arg0 : - arg0);
492 break;
494 case FFS:
495 /* Don't use ffs here. Instead, get low order bit and then its
496 number. If arg0 is zero, this will return 0, as desired. */
497 arg0 &= GET_MODE_MASK (mode);
498 val = exact_log2 (arg0 & (- arg0)) + 1;
499 break;
501 case CLZ:
502 arg0 &= GET_MODE_MASK (mode);
503 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
505 else
506 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
507 break;
509 case CTZ:
510 arg0 &= GET_MODE_MASK (mode);
511 if (arg0 == 0)
513 /* Even if the value at zero is undefined, we have to come
514 up with some replacement. Seems good enough. */
515 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
516 val = GET_MODE_BITSIZE (mode);
518 else
519 val = exact_log2 (arg0 & -arg0);
520 break;
522 case POPCOUNT:
523 arg0 &= GET_MODE_MASK (mode);
524 val = 0;
525 while (arg0)
526 val++, arg0 &= arg0 - 1;
527 break;
529 case PARITY:
530 arg0 &= GET_MODE_MASK (mode);
531 val = 0;
532 while (arg0)
533 val++, arg0 &= arg0 - 1;
534 val &= 1;
535 break;
537 case TRUNCATE:
538 val = arg0;
539 break;
541 case ZERO_EXTEND:
542 /* When zero-extending a CONST_INT, we need to know its
543 original mode. */
544 if (op_mode == VOIDmode)
545 abort ();
546 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
548 /* If we were really extending the mode,
549 we would have to distinguish between zero-extension
550 and sign-extension. */
551 if (width != GET_MODE_BITSIZE (op_mode))
552 abort ();
553 val = arg0;
555 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
556 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
557 else
558 return 0;
559 break;
561 case SIGN_EXTEND:
562 if (op_mode == VOIDmode)
563 op_mode = mode;
564 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
566 /* If we were really extending the mode,
567 we would have to distinguish between zero-extension
568 and sign-extension. */
569 if (width != GET_MODE_BITSIZE (op_mode))
570 abort ();
571 val = arg0;
573 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
576 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
577 if (val
578 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
579 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
581 else
582 return 0;
583 break;
585 case SQRT:
586 case FLOAT_EXTEND:
587 case FLOAT_TRUNCATE:
588 case SS_TRUNCATE:
589 case US_TRUNCATE:
590 return 0;
592 default:
593 abort ();
596 val = trunc_int_for_mode (val, mode);
598 return GEN_INT (val);
601 /* We can do some operations on integer CONST_DOUBLEs. Also allow
602 for a DImode operation on a CONST_INT. */
603 else if (GET_MODE (trueop) == VOIDmode
604 && width <= HOST_BITS_PER_WIDE_INT * 2
605 && (GET_CODE (trueop) == CONST_DOUBLE
606 || GET_CODE (trueop) == CONST_INT))
608 unsigned HOST_WIDE_INT l1, lv;
609 HOST_WIDE_INT h1, hv;
611 if (GET_CODE (trueop) == CONST_DOUBLE)
612 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
613 else
614 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
616 switch (code)
618 case NOT:
619 lv = ~ l1;
620 hv = ~ h1;
621 break;
623 case NEG:
624 neg_double (l1, h1, &lv, &hv);
625 break;
627 case ABS:
628 if (h1 < 0)
629 neg_double (l1, h1, &lv, &hv);
630 else
631 lv = l1, hv = h1;
632 break;
634 case FFS:
635 hv = 0;
636 if (l1 == 0)
638 if (h1 == 0)
639 lv = 0;
640 else
641 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
643 else
644 lv = exact_log2 (l1 & -l1) + 1;
645 break;
647 case CLZ:
648 hv = 0;
649 if (h1 != 0)
650 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
651 - HOST_BITS_PER_WIDE_INT;
652 else if (l1 != 0)
653 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
654 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
655 lv = GET_MODE_BITSIZE (mode);
656 break;
658 case CTZ:
659 hv = 0;
660 if (l1 != 0)
661 lv = exact_log2 (l1 & -l1);
662 else if (h1 != 0)
663 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
664 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
665 lv = GET_MODE_BITSIZE (mode);
666 break;
668 case POPCOUNT:
669 hv = 0;
670 lv = 0;
671 while (l1)
672 lv++, l1 &= l1 - 1;
673 while (h1)
674 lv++, h1 &= h1 - 1;
675 break;
677 case PARITY:
678 hv = 0;
679 lv = 0;
680 while (l1)
681 lv++, l1 &= l1 - 1;
682 while (h1)
683 lv++, h1 &= h1 - 1;
684 lv &= 1;
685 break;
687 case TRUNCATE:
688 /* This is just a change-of-mode, so do nothing. */
689 lv = l1, hv = h1;
690 break;
692 case ZERO_EXTEND:
693 if (op_mode == VOIDmode)
694 abort ();
696 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
697 return 0;
699 hv = 0;
700 lv = l1 & GET_MODE_MASK (op_mode);
701 break;
703 case SIGN_EXTEND:
704 if (op_mode == VOIDmode
705 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
706 return 0;
707 else
709 lv = l1 & GET_MODE_MASK (op_mode);
710 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
711 && (lv & ((HOST_WIDE_INT) 1
712 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
713 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
715 hv = HWI_SIGN_EXTEND (lv);
717 break;
719 case SQRT:
720 return 0;
722 default:
723 return 0;
726 return immed_double_const (lv, hv, mode);
729 else if (GET_CODE (trueop) == CONST_DOUBLE
730 && GET_MODE_CLASS (mode) == MODE_FLOAT)
732 REAL_VALUE_TYPE d, t;
733 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
735 switch (code)
737 case SQRT:
738 if (HONOR_SNANS (mode) && real_isnan (&d))
739 return 0;
740 real_sqrt (&t, mode, &d);
741 d = t;
742 break;
743 case ABS:
744 d = REAL_VALUE_ABS (d);
745 break;
746 case NEG:
747 d = REAL_VALUE_NEGATE (d);
748 break;
749 case FLOAT_TRUNCATE:
750 d = real_value_truncate (mode, d);
751 break;
752 case FLOAT_EXTEND:
753 /* All this does is change the mode. */
754 break;
755 case FIX:
756 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
757 break;
758 case NOT:
760 long tmp[4];
761 int i;
763 real_to_target (tmp, &d, GET_MODE (trueop));
764 for (i = 0; i < 4; i++)
765 tmp[i] = ~tmp[i];
766 real_from_target (&d, tmp, mode);
768 default:
769 abort ();
771 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
774 else if (GET_CODE (trueop) == CONST_DOUBLE
775 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
776 && GET_MODE_CLASS (mode) == MODE_INT
777 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
779 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
780 operators are intentionally left unspecified (to ease implementation
781 by target backends), for consistency, this routine implements the
782 same semantics for constant folding as used by the middle-end. */
784 HOST_WIDE_INT xh, xl, th, tl;
785 REAL_VALUE_TYPE x, t;
786 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
787 switch (code)
789 case FIX:
790 if (REAL_VALUE_ISNAN (x))
791 return const0_rtx;
793 /* Test against the signed upper bound. */
794 if (width > HOST_BITS_PER_WIDE_INT)
796 th = ((unsigned HOST_WIDE_INT) 1
797 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
798 tl = -1;
800 else
802 th = 0;
803 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
805 real_from_integer (&t, VOIDmode, tl, th, 0);
806 if (REAL_VALUES_LESS (t, x))
808 xh = th;
809 xl = tl;
810 break;
813 /* Test against the signed lower bound. */
814 if (width > HOST_BITS_PER_WIDE_INT)
816 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
817 tl = 0;
819 else
821 th = -1;
822 tl = (HOST_WIDE_INT) -1 << (width - 1);
824 real_from_integer (&t, VOIDmode, tl, th, 0);
825 if (REAL_VALUES_LESS (x, t))
827 xh = th;
828 xl = tl;
829 break;
831 REAL_VALUE_TO_INT (&xl, &xh, x);
832 break;
834 case UNSIGNED_FIX:
835 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
836 return const0_rtx;
838 /* Test against the unsigned upper bound. */
839 if (width == 2*HOST_BITS_PER_WIDE_INT)
841 th = -1;
842 tl = -1;
844 else if (width >= HOST_BITS_PER_WIDE_INT)
846 th = ((unsigned HOST_WIDE_INT) 1
847 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
848 tl = -1;
850 else
852 th = 0;
853 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
855 real_from_integer (&t, VOIDmode, tl, th, 1);
856 if (REAL_VALUES_LESS (t, x))
858 xh = th;
859 xl = tl;
860 break;
863 REAL_VALUE_TO_INT (&xl, &xh, x);
864 break;
866 default:
867 abort ();
869 return immed_double_const (xl, xh, mode);
872 /* This was formerly used only for non-IEEE float.
873 eggert@twinsun.com says it is safe for IEEE also. */
874 else
876 enum rtx_code reversed;
877 rtx temp;
879 /* There are some simplifications we can do even if the operands
880 aren't constant. */
881 switch (code)
883 case NOT:
884 /* (not (not X)) == X. */
885 if (GET_CODE (op) == NOT)
886 return XEXP (op, 0);
888 /* (not (eq X Y)) == (ne X Y), etc. */
889 if (COMPARISON_P (op)
890 && (mode == BImode || STORE_FLAG_VALUE == -1)
891 && ((reversed = reversed_comparison_code (op, NULL_RTX))
892 != UNKNOWN))
893 return simplify_gen_relational (reversed, mode, VOIDmode,
894 XEXP (op, 0), XEXP (op, 1));
896 /* (not (plus X -1)) can become (neg X). */
897 if (GET_CODE (op) == PLUS
898 && XEXP (op, 1) == constm1_rtx)
899 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
901 /* Similarly, (not (neg X)) is (plus X -1). */
902 if (GET_CODE (op) == NEG)
903 return plus_constant (XEXP (op, 0), -1);
905 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
906 if (GET_CODE (op) == XOR
907 && GET_CODE (XEXP (op, 1)) == CONST_INT
908 && (temp = simplify_unary_operation (NOT, mode,
909 XEXP (op, 1),
910 mode)) != 0)
911 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
913 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
914 if (GET_CODE (op) == PLUS
915 && GET_CODE (XEXP (op, 1)) == CONST_INT
916 && mode_signbit_p (mode, XEXP (op, 1))
917 && (temp = simplify_unary_operation (NOT, mode,
918 XEXP (op, 1),
919 mode)) != 0)
920 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
924 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
925 operands other than 1, but that is not valid. We could do a
926 similar simplification for (not (lshiftrt C X)) where C is
927 just the sign bit, but this doesn't seem common enough to
928 bother with. */
929 if (GET_CODE (op) == ASHIFT
930 && XEXP (op, 0) == const1_rtx)
932 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
933 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
936 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
937 by reversing the comparison code if valid. */
938 if (STORE_FLAG_VALUE == -1
939 && COMPARISON_P (op)
940 && (reversed = reversed_comparison_code (op, NULL_RTX))
941 != UNKNOWN)
942 return simplify_gen_relational (reversed, mode, VOIDmode,
943 XEXP (op, 0), XEXP (op, 1));
945 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
946 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
947 so we can perform the above simplification. */
949 if (STORE_FLAG_VALUE == -1
950 && GET_CODE (op) == ASHIFTRT
951 && GET_CODE (XEXP (op, 1)) == CONST_INT
952 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
953 return simplify_gen_relational (GE, mode, VOIDmode,
954 XEXP (op, 0), const0_rtx);
956 break;
958 case NEG:
959 /* (neg (neg X)) == X. */
960 if (GET_CODE (op) == NEG)
961 return XEXP (op, 0);
963 /* (neg (plus X 1)) can become (not X). */
964 if (GET_CODE (op) == PLUS
965 && XEXP (op, 1) == const1_rtx)
966 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
968 /* Similarly, (neg (not X)) is (plus X 1). */
969 if (GET_CODE (op) == NOT)
970 return plus_constant (XEXP (op, 0), 1);
972 /* (neg (minus X Y)) can become (minus Y X). This transformation
973 isn't safe for modes with signed zeros, since if X and Y are
974 both +0, (minus Y X) is the same as (minus X Y). If the
975 rounding mode is towards +infinity (or -infinity) then the two
976 expressions will be rounded differently. */
977 if (GET_CODE (op) == MINUS
978 && !HONOR_SIGNED_ZEROS (mode)
979 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
980 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
981 XEXP (op, 0));
983 if (GET_CODE (op) == PLUS
984 && !HONOR_SIGNED_ZEROS (mode)
985 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
987 /* (neg (plus A C)) is simplified to (minus -C A). */
988 if (GET_CODE (XEXP (op, 1)) == CONST_INT
989 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
991 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
992 mode);
993 if (temp)
994 return simplify_gen_binary (MINUS, mode, temp,
995 XEXP (op, 0));
998 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
999 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1000 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1003 /* (neg (mult A B)) becomes (mult (neg A) B).
1004 This works even for floating-point values. */
1005 if (GET_CODE (op) == MULT
1006 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1008 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1009 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1012 /* NEG commutes with ASHIFT since it is multiplication. Only do
1013 this if we can then eliminate the NEG (e.g., if the operand
1014 is a constant). */
1015 if (GET_CODE (op) == ASHIFT)
1017 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1018 mode);
1019 if (temp)
1020 return simplify_gen_binary (ASHIFT, mode, temp,
1021 XEXP (op, 1));
1024 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1025 C is equal to the width of MODE minus 1. */
1026 if (GET_CODE (op) == ASHIFTRT
1027 && GET_CODE (XEXP (op, 1)) == CONST_INT
1028 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1029 return simplify_gen_binary (LSHIFTRT, mode,
1030 XEXP (op, 0), XEXP (op, 1));
1032 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1033 C is equal to the width of MODE minus 1. */
1034 if (GET_CODE (op) == LSHIFTRT
1035 && GET_CODE (XEXP (op, 1)) == CONST_INT
1036 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1037 return simplify_gen_binary (ASHIFTRT, mode,
1038 XEXP (op, 0), XEXP (op, 1));
1040 break;
1042 case SIGN_EXTEND:
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1046 the VAX). */
1047 if (GET_CODE (op) == TRUNCATE
1048 && GET_MODE (XEXP (op, 0)) == mode
1049 && GET_CODE (XEXP (op, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1052 return XEXP (op, 0);
1054 /* Check for a sign extension of a subreg of a promoted
1055 variable, where the promotion is sign-extended, and the
1056 target mode is the same as the variable's promotion. */
1057 if (GET_CODE (op) == SUBREG
1058 && SUBREG_PROMOTED_VAR_P (op)
1059 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1060 && GET_MODE (XEXP (op, 0)) == mode)
1061 return XEXP (op, 0);
1063 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1064 if (! POINTERS_EXTEND_UNSIGNED
1065 && mode == Pmode && GET_MODE (op) == ptr_mode
1066 && (CONSTANT_P (op)
1067 || (GET_CODE (op) == SUBREG
1068 && REG_P (SUBREG_REG (op))
1069 && REG_POINTER (SUBREG_REG (op))
1070 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1071 return convert_memory_address (Pmode, op);
1072 #endif
1073 break;
1075 case ZERO_EXTEND:
1076 /* Check for a zero extension of a subreg of a promoted
1077 variable, where the promotion is zero-extended, and the
1078 target mode is the same as the variable's promotion. */
1079 if (GET_CODE (op) == SUBREG
1080 && SUBREG_PROMOTED_VAR_P (op)
1081 && SUBREG_PROMOTED_UNSIGNED_P (op)
1082 && GET_MODE (XEXP (op, 0)) == mode)
1083 return XEXP (op, 0);
1085 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1086 if (POINTERS_EXTEND_UNSIGNED > 0
1087 && mode == Pmode && GET_MODE (op) == ptr_mode
1088 && (CONSTANT_P (op)
1089 || (GET_CODE (op) == SUBREG
1090 && REG_P (SUBREG_REG (op))
1091 && REG_POINTER (SUBREG_REG (op))
1092 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1093 return convert_memory_address (Pmode, op);
1094 #endif
1095 break;
1097 default:
1098 break;
1101 return 0;
1105 /* Subroutine of simplify_binary_operation to simplify a commutative,
1106 associative binary operation CODE with result mode MODE, operating
1107 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1108 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1109 canonicalization is possible. */
1111 static rtx
1112 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1113 rtx op0, rtx op1)
1115 rtx tem;
1117 /* Linearize the operator to the left. */
1118 if (GET_CODE (op1) == code)
1120 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1121 if (GET_CODE (op0) == code)
1123 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1124 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1127 /* "a op (b op c)" becomes "(b op c) op a". */
1128 if (! swap_commutative_operands_p (op1, op0))
1129 return simplify_gen_binary (code, mode, op1, op0);
1131 tem = op0;
1132 op0 = op1;
1133 op1 = tem;
1136 if (GET_CODE (op0) == code)
1138 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1139 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1141 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1142 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1145 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1146 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1147 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1148 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1149 if (tem != 0)
1150 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1152 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1153 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1154 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1155 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1156 if (tem != 0)
1157 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1160 return 0;
1163 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1164 and OP1. Return 0 if no simplification is possible.
1166 Don't use this for relational operations such as EQ or LT.
1167 Use simplify_relational_operation instead. */
1169 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1170 rtx op0, rtx op1)
1172 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1173 HOST_WIDE_INT val;
1174 unsigned int width = GET_MODE_BITSIZE (mode);
1175 rtx trueop0, trueop1;
1176 rtx tem;
1178 #ifdef ENABLE_CHECKING
1179 /* Relational operations don't work here. We must know the mode
1180 of the operands in order to do the comparison correctly.
1181 Assuming a full word can give incorrect results.
1182 Consider comparing 128 with -128 in QImode. */
1184 if (GET_RTX_CLASS (code) == RTX_COMPARE
1185 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
1186 abort ();
1187 #endif
1189 /* Make sure the constant is second. */
1190 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1191 && swap_commutative_operands_p (op0, op1))
1193 tem = op0, op0 = op1, op1 = tem;
1196 trueop0 = avoid_constant_pool_reference (op0);
1197 trueop1 = avoid_constant_pool_reference (op1);
1199 if (VECTOR_MODE_P (mode)
1200 && GET_CODE (trueop0) == CONST_VECTOR
1201 && GET_CODE (trueop1) == CONST_VECTOR)
1203 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1204 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1205 enum machine_mode op0mode = GET_MODE (trueop0);
1206 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1207 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1208 enum machine_mode op1mode = GET_MODE (trueop1);
1209 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1210 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1211 rtvec v = rtvec_alloc (n_elts);
1212 unsigned int i;
1214 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1215 abort ();
1217 for (i = 0; i < n_elts; i++)
1219 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1220 CONST_VECTOR_ELT (trueop0, i),
1221 CONST_VECTOR_ELT (trueop1, i));
1222 if (!x)
1223 return 0;
1224 RTVEC_ELT (v, i) = x;
1227 return gen_rtx_CONST_VECTOR (mode, v);
1230 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1231 && GET_CODE (trueop0) == CONST_DOUBLE
1232 && GET_CODE (trueop1) == CONST_DOUBLE
1233 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1235 if (code == AND
1236 || code == IOR
1237 || code == XOR)
1239 long tmp0[4];
1240 long tmp1[4];
1241 REAL_VALUE_TYPE r;
1242 int i;
1244 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1245 GET_MODE (op0));
1246 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1247 GET_MODE (op1));
1248 for (i = 0; i < 4; i++)
1250 if (code == AND)
1251 tmp0[i] &= tmp1[i];
1252 else if (code == IOR)
1253 tmp0[i] |= tmp1[i];
1254 else if (code == XOR)
1255 tmp0[i] ^= tmp1[i];
1256 else
1257 abort ();
1259 real_from_target (&r, tmp0, mode);
1260 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1262 else
1264 REAL_VALUE_TYPE f0, f1, value;
1266 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1267 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1268 f0 = real_value_truncate (mode, f0);
1269 f1 = real_value_truncate (mode, f1);
1271 if (HONOR_SNANS (mode)
1272 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1273 return 0;
1275 if (code == DIV
1276 && REAL_VALUES_EQUAL (f1, dconst0)
1277 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1278 return 0;
1280 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1281 && flag_trapping_math
1282 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1284 int s0 = REAL_VALUE_NEGATIVE (f0);
1285 int s1 = REAL_VALUE_NEGATIVE (f1);
1287 switch (code)
1289 case PLUS:
1290 /* Inf + -Inf = NaN plus exception. */
1291 if (s0 != s1)
1292 return 0;
1293 break;
1294 case MINUS:
1295 /* Inf - Inf = NaN plus exception. */
1296 if (s0 == s1)
1297 return 0;
1298 break;
1299 case DIV:
1300 /* Inf / Inf = NaN plus exception. */
1301 return 0;
1302 default:
1303 break;
1307 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1308 && flag_trapping_math
1309 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1310 || (REAL_VALUE_ISINF (f1)
1311 && REAL_VALUES_EQUAL (f0, dconst0))))
1312 /* Inf * 0 = NaN plus exception. */
1313 return 0;
1315 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1317 value = real_value_truncate (mode, value);
1318 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1322 /* We can fold some multi-word operations. */
1323 if (GET_MODE_CLASS (mode) == MODE_INT
1324 && width == HOST_BITS_PER_WIDE_INT * 2
1325 && (GET_CODE (trueop0) == CONST_DOUBLE
1326 || GET_CODE (trueop0) == CONST_INT)
1327 && (GET_CODE (trueop1) == CONST_DOUBLE
1328 || GET_CODE (trueop1) == CONST_INT))
1330 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1331 HOST_WIDE_INT h1, h2, hv, ht;
1333 if (GET_CODE (trueop0) == CONST_DOUBLE)
1334 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1335 else
1336 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1338 if (GET_CODE (trueop1) == CONST_DOUBLE)
1339 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1340 else
1341 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1343 switch (code)
1345 case MINUS:
1346 /* A - B == A + (-B). */
1347 neg_double (l2, h2, &lv, &hv);
1348 l2 = lv, h2 = hv;
1350 /* Fall through.... */
1352 case PLUS:
1353 add_double (l1, h1, l2, h2, &lv, &hv);
1354 break;
1356 case MULT:
1357 mul_double (l1, h1, l2, h2, &lv, &hv);
1358 break;
1360 case DIV:
1361 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1362 &lv, &hv, &lt, &ht))
1363 return 0;
1364 break;
1366 case MOD:
1367 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1368 &lt, &ht, &lv, &hv))
1369 return 0;
1370 break;
1372 case UDIV:
1373 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1374 &lv, &hv, &lt, &ht))
1375 return 0;
1376 break;
1378 case UMOD:
1379 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1380 &lt, &ht, &lv, &hv))
1381 return 0;
1382 break;
1384 case AND:
1385 lv = l1 & l2, hv = h1 & h2;
1386 break;
1388 case IOR:
1389 lv = l1 | l2, hv = h1 | h2;
1390 break;
1392 case XOR:
1393 lv = l1 ^ l2, hv = h1 ^ h2;
1394 break;
1396 case SMIN:
1397 if (h1 < h2
1398 || (h1 == h2
1399 && ((unsigned HOST_WIDE_INT) l1
1400 < (unsigned HOST_WIDE_INT) l2)))
1401 lv = l1, hv = h1;
1402 else
1403 lv = l2, hv = h2;
1404 break;
1406 case SMAX:
1407 if (h1 > h2
1408 || (h1 == h2
1409 && ((unsigned HOST_WIDE_INT) l1
1410 > (unsigned HOST_WIDE_INT) l2)))
1411 lv = l1, hv = h1;
1412 else
1413 lv = l2, hv = h2;
1414 break;
1416 case UMIN:
1417 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1418 || (h1 == h2
1419 && ((unsigned HOST_WIDE_INT) l1
1420 < (unsigned HOST_WIDE_INT) l2)))
1421 lv = l1, hv = h1;
1422 else
1423 lv = l2, hv = h2;
1424 break;
1426 case UMAX:
1427 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1428 || (h1 == h2
1429 && ((unsigned HOST_WIDE_INT) l1
1430 > (unsigned HOST_WIDE_INT) l2)))
1431 lv = l1, hv = h1;
1432 else
1433 lv = l2, hv = h2;
1434 break;
1436 case LSHIFTRT: case ASHIFTRT:
1437 case ASHIFT:
1438 case ROTATE: case ROTATERT:
1439 if (SHIFT_COUNT_TRUNCATED)
1440 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1442 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1443 return 0;
1445 if (code == LSHIFTRT || code == ASHIFTRT)
1446 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1447 code == ASHIFTRT);
1448 else if (code == ASHIFT)
1449 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1450 else if (code == ROTATE)
1451 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1452 else /* code == ROTATERT */
1453 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1454 break;
1456 default:
1457 return 0;
1460 return immed_double_const (lv, hv, mode);
1463 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1464 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1466 /* Even if we can't compute a constant result,
1467 there are some cases worth simplifying. */
1469 switch (code)
1471 case PLUS:
1472 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1473 when x is NaN, infinite, or finite and nonzero. They aren't
1474 when x is -0 and the rounding mode is not towards -infinity,
1475 since (-0) + 0 is then 0. */
1476 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1477 return op0;
1479 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1480 transformations are safe even for IEEE. */
1481 if (GET_CODE (op0) == NEG)
1482 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1483 else if (GET_CODE (op1) == NEG)
1484 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1486 /* (~a) + 1 -> -a */
1487 if (INTEGRAL_MODE_P (mode)
1488 && GET_CODE (op0) == NOT
1489 && trueop1 == const1_rtx)
1490 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1492 /* Handle both-operands-constant cases. We can only add
1493 CONST_INTs to constants since the sum of relocatable symbols
1494 can't be handled by most assemblers. Don't add CONST_INT
1495 to CONST_INT since overflow won't be computed properly if wider
1496 than HOST_BITS_PER_WIDE_INT. */
1498 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1499 && GET_CODE (op1) == CONST_INT)
1500 return plus_constant (op0, INTVAL (op1));
1501 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1502 && GET_CODE (op0) == CONST_INT)
1503 return plus_constant (op1, INTVAL (op0));
1505 /* See if this is something like X * C - X or vice versa or
1506 if the multiplication is written as a shift. If so, we can
1507 distribute and make a new multiply, shift, or maybe just
1508 have X (if C is 2 in the example above). But don't make
1509 something more expensive than we had before. */
1511 if (! FLOAT_MODE_P (mode))
1513 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1514 rtx lhs = op0, rhs = op1;
1516 if (GET_CODE (lhs) == NEG)
1517 coeff0 = -1, lhs = XEXP (lhs, 0);
1518 else if (GET_CODE (lhs) == MULT
1519 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1521 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1523 else if (GET_CODE (lhs) == ASHIFT
1524 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1525 && INTVAL (XEXP (lhs, 1)) >= 0
1526 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1528 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1529 lhs = XEXP (lhs, 0);
1532 if (GET_CODE (rhs) == NEG)
1533 coeff1 = -1, rhs = XEXP (rhs, 0);
1534 else if (GET_CODE (rhs) == MULT
1535 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1537 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1539 else if (GET_CODE (rhs) == ASHIFT
1540 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1541 && INTVAL (XEXP (rhs, 1)) >= 0
1542 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1544 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1545 rhs = XEXP (rhs, 0);
1548 if (rtx_equal_p (lhs, rhs))
1550 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1551 tem = simplify_gen_binary (MULT, mode, lhs,
1552 GEN_INT (coeff0 + coeff1));
1553 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1554 ? tem : 0;
1558 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1559 if ((GET_CODE (op1) == CONST_INT
1560 || GET_CODE (op1) == CONST_DOUBLE)
1561 && GET_CODE (op0) == XOR
1562 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1563 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1564 && mode_signbit_p (mode, op1))
1565 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1566 simplify_gen_binary (XOR, mode, op1,
1567 XEXP (op0, 1)));
1569 /* If one of the operands is a PLUS or a MINUS, see if we can
1570 simplify this by the associative law.
1571 Don't use the associative law for floating point.
1572 The inaccuracy makes it nonassociative,
1573 and subtle programs can break if operations are associated. */
1575 if (INTEGRAL_MODE_P (mode)
1576 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1577 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1578 || (GET_CODE (op0) == CONST
1579 && GET_CODE (XEXP (op0, 0)) == PLUS)
1580 || (GET_CODE (op1) == CONST
1581 && GET_CODE (XEXP (op1, 0)) == PLUS))
1582 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1583 return tem;
1585 /* Reassociate floating point addition only when the user
1586 specifies unsafe math optimizations. */
1587 if (FLOAT_MODE_P (mode)
1588 && flag_unsafe_math_optimizations)
1590 tem = simplify_associative_operation (code, mode, op0, op1);
1591 if (tem)
1592 return tem;
1594 break;
1596 case COMPARE:
1597 #ifdef HAVE_cc0
1598 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1599 using cc0, in which case we want to leave it as a COMPARE
1600 so we can distinguish it from a register-register-copy.
1602 In IEEE floating point, x-0 is not the same as x. */
1604 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1605 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1606 && trueop1 == CONST0_RTX (mode))
1607 return op0;
1608 #endif
1610 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1611 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1612 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1613 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1615 rtx xop00 = XEXP (op0, 0);
1616 rtx xop10 = XEXP (op1, 0);
1618 #ifdef HAVE_cc0
1619 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1620 #else
1621 if (REG_P (xop00) && REG_P (xop10)
1622 && GET_MODE (xop00) == GET_MODE (xop10)
1623 && REGNO (xop00) == REGNO (xop10)
1624 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1625 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1626 #endif
1627 return xop00;
1629 break;
1631 case MINUS:
1632 /* We can't assume x-x is 0 even with non-IEEE floating point,
1633 but since it is zero except in very strange circumstances, we
1634 will treat it as zero with -funsafe-math-optimizations. */
1635 if (rtx_equal_p (trueop0, trueop1)
1636 && ! side_effects_p (op0)
1637 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1638 return CONST0_RTX (mode);
1640 /* Change subtraction from zero into negation. (0 - x) is the
1641 same as -x when x is NaN, infinite, or finite and nonzero.
1642 But if the mode has signed zeros, and does not round towards
1643 -infinity, then 0 - 0 is 0, not -0. */
1644 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1645 return simplify_gen_unary (NEG, mode, op1, mode);
1647 /* (-1 - a) is ~a. */
1648 if (trueop0 == constm1_rtx)
1649 return simplify_gen_unary (NOT, mode, op1, mode);
1651 /* Subtracting 0 has no effect unless the mode has signed zeros
1652 and supports rounding towards -infinity. In such a case,
1653 0 - 0 is -0. */
1654 if (!(HONOR_SIGNED_ZEROS (mode)
1655 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1656 && trueop1 == CONST0_RTX (mode))
1657 return op0;
1659 /* See if this is something like X * C - X or vice versa or
1660 if the multiplication is written as a shift. If so, we can
1661 distribute and make a new multiply, shift, or maybe just
1662 have X (if C is 2 in the example above). But don't make
1663 something more expensive than we had before. */
1665 if (! FLOAT_MODE_P (mode))
1667 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1668 rtx lhs = op0, rhs = op1;
1670 if (GET_CODE (lhs) == NEG)
1671 coeff0 = -1, lhs = XEXP (lhs, 0);
1672 else if (GET_CODE (lhs) == MULT
1673 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1675 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1677 else if (GET_CODE (lhs) == ASHIFT
1678 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1679 && INTVAL (XEXP (lhs, 1)) >= 0
1680 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1682 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1683 lhs = XEXP (lhs, 0);
1686 if (GET_CODE (rhs) == NEG)
1687 coeff1 = - 1, rhs = XEXP (rhs, 0);
1688 else if (GET_CODE (rhs) == MULT
1689 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1691 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1693 else if (GET_CODE (rhs) == ASHIFT
1694 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1695 && INTVAL (XEXP (rhs, 1)) >= 0
1696 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1698 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1699 rhs = XEXP (rhs, 0);
1702 if (rtx_equal_p (lhs, rhs))
1704 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1705 tem = simplify_gen_binary (MULT, mode, lhs,
1706 GEN_INT (coeff0 - coeff1));
1707 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1708 ? tem : 0;
1712 /* (a - (-b)) -> (a + b). True even for IEEE. */
1713 if (GET_CODE (op1) == NEG)
1714 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1716 /* (-x - c) may be simplified as (-c - x). */
1717 if (GET_CODE (op0) == NEG
1718 && (GET_CODE (op1) == CONST_INT
1719 || GET_CODE (op1) == CONST_DOUBLE))
1721 tem = simplify_unary_operation (NEG, mode, op1, mode);
1722 if (tem)
1723 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1726 /* If one of the operands is a PLUS or a MINUS, see if we can
1727 simplify this by the associative law.
1728 Don't use the associative law for floating point.
1729 The inaccuracy makes it nonassociative,
1730 and subtle programs can break if operations are associated. */
1732 if (INTEGRAL_MODE_P (mode)
1733 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1734 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1735 || (GET_CODE (op0) == CONST
1736 && GET_CODE (XEXP (op0, 0)) == PLUS)
1737 || (GET_CODE (op1) == CONST
1738 && GET_CODE (XEXP (op1, 0)) == PLUS))
1739 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1740 return tem;
1742 /* Don't let a relocatable value get a negative coeff. */
1743 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1744 return simplify_gen_binary (PLUS, mode,
1745 op0,
1746 neg_const_int (mode, op1));
1748 /* (x - (x & y)) -> (x & ~y) */
1749 if (GET_CODE (op1) == AND)
1751 if (rtx_equal_p (op0, XEXP (op1, 0)))
1753 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1754 GET_MODE (XEXP (op1, 1)));
1755 return simplify_gen_binary (AND, mode, op0, tem);
1757 if (rtx_equal_p (op0, XEXP (op1, 1)))
1759 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1760 GET_MODE (XEXP (op1, 0)));
1761 return simplify_gen_binary (AND, mode, op0, tem);
1764 break;
1766 case MULT:
1767 if (trueop1 == constm1_rtx)
1768 return simplify_gen_unary (NEG, mode, op0, mode);
1770 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1771 x is NaN, since x * 0 is then also NaN. Nor is it valid
1772 when the mode has signed zeros, since multiplying a negative
1773 number by 0 will give -0, not 0. */
1774 if (!HONOR_NANS (mode)
1775 && !HONOR_SIGNED_ZEROS (mode)
1776 && trueop1 == CONST0_RTX (mode)
1777 && ! side_effects_p (op0))
1778 return op1;
1780 /* In IEEE floating point, x*1 is not equivalent to x for
1781 signalling NaNs. */
1782 if (!HONOR_SNANS (mode)
1783 && trueop1 == CONST1_RTX (mode))
1784 return op0;
1786 /* Convert multiply by constant power of two into shift unless
1787 we are still generating RTL. This test is a kludge. */
1788 if (GET_CODE (trueop1) == CONST_INT
1789 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1790 /* If the mode is larger than the host word size, and the
1791 uppermost bit is set, then this isn't a power of two due
1792 to implicit sign extension. */
1793 && (width <= HOST_BITS_PER_WIDE_INT
1794 || val != HOST_BITS_PER_WIDE_INT - 1))
1795 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1797 /* x*2 is x+x and x*(-1) is -x */
1798 if (GET_CODE (trueop1) == CONST_DOUBLE
1799 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1800 && GET_MODE (op0) == mode)
1802 REAL_VALUE_TYPE d;
1803 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1805 if (REAL_VALUES_EQUAL (d, dconst2))
1806 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1808 if (REAL_VALUES_EQUAL (d, dconstm1))
1809 return simplify_gen_unary (NEG, mode, op0, mode);
1812 /* Reassociate multiplication, but for floating point MULTs
1813 only when the user specifies unsafe math optimizations. */
1814 if (! FLOAT_MODE_P (mode)
1815 || flag_unsafe_math_optimizations)
1817 tem = simplify_associative_operation (code, mode, op0, op1);
1818 if (tem)
1819 return tem;
1821 break;
1823 case IOR:
1824 if (trueop1 == const0_rtx)
1825 return op0;
1826 if (GET_CODE (trueop1) == CONST_INT
1827 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1828 == GET_MODE_MASK (mode)))
1829 return op1;
1830 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1831 return op0;
1832 /* A | (~A) -> -1 */
1833 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1834 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1835 && ! side_effects_p (op0)
1836 && GET_MODE_CLASS (mode) != MODE_CC)
1837 return constm1_rtx;
1838 tem = simplify_associative_operation (code, mode, op0, op1);
1839 if (tem)
1840 return tem;
1841 break;
1843 case XOR:
1844 if (trueop1 == const0_rtx)
1845 return op0;
1846 if (GET_CODE (trueop1) == CONST_INT
1847 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1848 == GET_MODE_MASK (mode)))
1849 return simplify_gen_unary (NOT, mode, op0, mode);
1850 if (trueop0 == trueop1
1851 && ! side_effects_p (op0)
1852 && GET_MODE_CLASS (mode) != MODE_CC)
1853 return const0_rtx;
1855 /* Canonicalize XOR of the most significant bit to PLUS. */
1856 if ((GET_CODE (op1) == CONST_INT
1857 || GET_CODE (op1) == CONST_DOUBLE)
1858 && mode_signbit_p (mode, op1))
1859 return simplify_gen_binary (PLUS, mode, op0, op1);
1860 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1861 if ((GET_CODE (op1) == CONST_INT
1862 || GET_CODE (op1) == CONST_DOUBLE)
1863 && GET_CODE (op0) == PLUS
1864 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1865 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1866 && mode_signbit_p (mode, XEXP (op0, 1)))
1867 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1868 simplify_gen_binary (XOR, mode, op1,
1869 XEXP (op0, 1)));
1871 tem = simplify_associative_operation (code, mode, op0, op1);
1872 if (tem)
1873 return tem;
1874 break;
1876 case AND:
1877 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1878 return const0_rtx;
1879 /* If we are turning off bits already known off in OP0, we need
1880 not do an AND. */
1881 if (GET_CODE (trueop1) == CONST_INT
1882 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1883 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1884 return op0;
1885 if (trueop0 == trueop1 && ! side_effects_p (op0)
1886 && GET_MODE_CLASS (mode) != MODE_CC)
1887 return op0;
1888 /* A & (~A) -> 0 */
1889 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1890 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1891 && ! side_effects_p (op0)
1892 && GET_MODE_CLASS (mode) != MODE_CC)
1893 return const0_rtx;
1894 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1895 ((A & N) + B) & M -> (A + B) & M
1896 Similarly if (N & M) == 0,
1897 ((A | N) + B) & M -> (A + B) & M
1898 and for - instead of + and/or ^ instead of |. */
1899 if (GET_CODE (trueop1) == CONST_INT
1900 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1901 && ~INTVAL (trueop1)
1902 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1903 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1905 rtx pmop[2];
1906 int which;
1908 pmop[0] = XEXP (op0, 0);
1909 pmop[1] = XEXP (op0, 1);
1911 for (which = 0; which < 2; which++)
1913 tem = pmop[which];
1914 switch (GET_CODE (tem))
1916 case AND:
1917 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1918 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1919 == INTVAL (trueop1))
1920 pmop[which] = XEXP (tem, 0);
1921 break;
1922 case IOR:
1923 case XOR:
1924 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1925 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1926 pmop[which] = XEXP (tem, 0);
1927 break;
1928 default:
1929 break;
1933 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1935 tem = simplify_gen_binary (GET_CODE (op0), mode,
1936 pmop[0], pmop[1]);
1937 return simplify_gen_binary (code, mode, tem, op1);
1940 tem = simplify_associative_operation (code, mode, op0, op1);
1941 if (tem)
1942 return tem;
1943 break;
1945 case UDIV:
1946 /* 0/x is 0 (or x&0 if x has side-effects). */
1947 if (trueop0 == const0_rtx)
1948 return side_effects_p (op1)
1949 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1950 : const0_rtx;
1951 /* x/1 is x. */
1952 if (trueop1 == const1_rtx)
1954 /* Handle narrowing UDIV. */
1955 rtx x = gen_lowpart_common (mode, op0);
1956 if (x)
1957 return x;
1958 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1959 return gen_lowpart_SUBREG (mode, op0);
1960 return op0;
1962 /* Convert divide by power of two into shift. */
1963 if (GET_CODE (trueop1) == CONST_INT
1964 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1965 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1966 break;
1968 case DIV:
1969 /* Handle floating point and integers separately. */
1970 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1972 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1973 safe for modes with NaNs, since 0.0 / 0.0 will then be
1974 NaN rather than 0.0. Nor is it safe for modes with signed
1975 zeros, since dividing 0 by a negative number gives -0.0 */
1976 if (trueop0 == CONST0_RTX (mode)
1977 && !HONOR_NANS (mode)
1978 && !HONOR_SIGNED_ZEROS (mode)
1979 && ! side_effects_p (op1))
1980 return op0;
1981 /* x/1.0 is x. */
1982 if (trueop1 == CONST1_RTX (mode)
1983 && !HONOR_SNANS (mode))
1984 return op0;
1986 if (GET_CODE (trueop1) == CONST_DOUBLE
1987 && trueop1 != CONST0_RTX (mode))
1989 REAL_VALUE_TYPE d;
1990 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1992 /* x/-1.0 is -x. */
1993 if (REAL_VALUES_EQUAL (d, dconstm1)
1994 && !HONOR_SNANS (mode))
1995 return simplify_gen_unary (NEG, mode, op0, mode);
1997 /* Change FP division by a constant into multiplication.
1998 Only do this with -funsafe-math-optimizations. */
1999 if (flag_unsafe_math_optimizations
2000 && !REAL_VALUES_EQUAL (d, dconst0))
2002 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2003 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2004 return simplify_gen_binary (MULT, mode, op0, tem);
2008 else
2010 /* 0/x is 0 (or x&0 if x has side-effects). */
2011 if (trueop0 == const0_rtx)
2012 return side_effects_p (op1)
2013 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2014 : const0_rtx;
2015 /* x/1 is x. */
2016 if (trueop1 == const1_rtx)
2018 /* Handle narrowing DIV. */
2019 rtx x = gen_lowpart_common (mode, op0);
2020 if (x)
2021 return x;
2022 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2023 return gen_lowpart_SUBREG (mode, op0);
2024 return op0;
2026 /* x/-1 is -x. */
2027 if (trueop1 == constm1_rtx)
2029 rtx x = gen_lowpart_common (mode, op0);
2030 if (!x)
2031 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2032 ? gen_lowpart_SUBREG (mode, op0) : op0;
2033 return simplify_gen_unary (NEG, mode, x, mode);
2036 break;
2038 case UMOD:
2039 /* 0%x is 0 (or x&0 if x has side-effects). */
2040 if (trueop0 == const0_rtx)
2041 return side_effects_p (op1)
2042 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2043 : const0_rtx;
2044 /* x%1 is 0 (of x&0 if x has side-effects). */
2045 if (trueop1 == const1_rtx)
2046 return side_effects_p (op0)
2047 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2048 : const0_rtx;
2049 /* Implement modulus by power of two as AND. */
2050 if (GET_CODE (trueop1) == CONST_INT
2051 && exact_log2 (INTVAL (trueop1)) > 0)
2052 return simplify_gen_binary (AND, mode, op0,
2053 GEN_INT (INTVAL (op1) - 1));
2054 break;
2056 case MOD:
2057 /* 0%x is 0 (or x&0 if x has side-effects). */
2058 if (trueop0 == const0_rtx)
2059 return side_effects_p (op1)
2060 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2061 : const0_rtx;
2062 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2063 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
2064 return side_effects_p (op0)
2065 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2066 : const0_rtx;
2067 break;
2069 case ROTATERT:
2070 case ROTATE:
2071 case ASHIFTRT:
2072 /* Rotating ~0 always results in ~0. */
2073 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2074 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2075 && ! side_effects_p (op1))
2076 return op0;
2078 /* Fall through.... */
2080 case ASHIFT:
2081 case LSHIFTRT:
2082 if (trueop1 == const0_rtx)
2083 return op0;
2084 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2085 return op0;
2086 break;
2088 case SMIN:
2089 if (width <= HOST_BITS_PER_WIDE_INT
2090 && GET_CODE (trueop1) == CONST_INT
2091 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2092 && ! side_effects_p (op0))
2093 return op1;
2094 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2095 return op0;
2096 tem = simplify_associative_operation (code, mode, op0, op1);
2097 if (tem)
2098 return tem;
2099 break;
2101 case SMAX:
2102 if (width <= HOST_BITS_PER_WIDE_INT
2103 && GET_CODE (trueop1) == CONST_INT
2104 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2105 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2106 && ! side_effects_p (op0))
2107 return op1;
2108 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2109 return op0;
2110 tem = simplify_associative_operation (code, mode, op0, op1);
2111 if (tem)
2112 return tem;
2113 break;
2115 case UMIN:
2116 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2117 return op1;
2118 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2119 return op0;
2120 tem = simplify_associative_operation (code, mode, op0, op1);
2121 if (tem)
2122 return tem;
2123 break;
2125 case UMAX:
2126 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2127 return op1;
2128 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2129 return op0;
2130 tem = simplify_associative_operation (code, mode, op0, op1);
2131 if (tem)
2132 return tem;
2133 break;
2135 case SS_PLUS:
2136 case US_PLUS:
2137 case SS_MINUS:
2138 case US_MINUS:
2139 /* ??? There are simplifications that can be done. */
2140 return 0;
2142 case VEC_SELECT:
2143 if (!VECTOR_MODE_P (mode))
2145 if (!VECTOR_MODE_P (GET_MODE (trueop0))
2146 || (mode
2147 != GET_MODE_INNER (GET_MODE (trueop0)))
2148 || GET_CODE (trueop1) != PARALLEL
2149 || XVECLEN (trueop1, 0) != 1
2150 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
2151 abort ();
2153 if (GET_CODE (trueop0) == CONST_VECTOR)
2154 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
2156 else
2158 if (!VECTOR_MODE_P (GET_MODE (trueop0))
2159 || (GET_MODE_INNER (mode)
2160 != GET_MODE_INNER (GET_MODE (trueop0)))
2161 || GET_CODE (trueop1) != PARALLEL)
2162 abort ();
2164 if (GET_CODE (trueop0) == CONST_VECTOR)
2166 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2167 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2168 rtvec v = rtvec_alloc (n_elts);
2169 unsigned int i;
2171 if (XVECLEN (trueop1, 0) != (int) n_elts)
2172 abort ();
2173 for (i = 0; i < n_elts; i++)
2175 rtx x = XVECEXP (trueop1, 0, i);
2177 if (GET_CODE (x) != CONST_INT)
2178 abort ();
2179 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
2182 return gen_rtx_CONST_VECTOR (mode, v);
2185 return 0;
2186 case VEC_CONCAT:
2188 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2189 ? GET_MODE (trueop0)
2190 : GET_MODE_INNER (mode));
2191 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2192 ? GET_MODE (trueop1)
2193 : GET_MODE_INNER (mode));
2195 if (!VECTOR_MODE_P (mode)
2196 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2197 != GET_MODE_SIZE (mode)))
2198 abort ();
2200 if ((VECTOR_MODE_P (op0_mode)
2201 && (GET_MODE_INNER (mode)
2202 != GET_MODE_INNER (op0_mode)))
2203 || (!VECTOR_MODE_P (op0_mode)
2204 && GET_MODE_INNER (mode) != op0_mode))
2205 abort ();
2207 if ((VECTOR_MODE_P (op1_mode)
2208 && (GET_MODE_INNER (mode)
2209 != GET_MODE_INNER (op1_mode)))
2210 || (!VECTOR_MODE_P (op1_mode)
2211 && GET_MODE_INNER (mode) != op1_mode))
2212 abort ();
2214 if ((GET_CODE (trueop0) == CONST_VECTOR
2215 || GET_CODE (trueop0) == CONST_INT
2216 || GET_CODE (trueop0) == CONST_DOUBLE)
2217 && (GET_CODE (trueop1) == CONST_VECTOR
2218 || GET_CODE (trueop1) == CONST_INT
2219 || GET_CODE (trueop1) == CONST_DOUBLE))
2221 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2222 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2223 rtvec v = rtvec_alloc (n_elts);
2224 unsigned int i;
2225 unsigned in_n_elts = 1;
2227 if (VECTOR_MODE_P (op0_mode))
2228 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2229 for (i = 0; i < n_elts; i++)
2231 if (i < in_n_elts)
2233 if (!VECTOR_MODE_P (op0_mode))
2234 RTVEC_ELT (v, i) = trueop0;
2235 else
2236 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2238 else
2240 if (!VECTOR_MODE_P (op1_mode))
2241 RTVEC_ELT (v, i) = trueop1;
2242 else
2243 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2244 i - in_n_elts);
2248 return gen_rtx_CONST_VECTOR (mode, v);
2251 return 0;
2253 default:
2254 abort ();
2257 return 0;
2260 /* Get the integer argument values in two forms:
2261 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2263 arg0 = INTVAL (trueop0);
2264 arg1 = INTVAL (trueop1);
2266 if (width < HOST_BITS_PER_WIDE_INT)
2268 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2269 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2271 arg0s = arg0;
2272 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2273 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2275 arg1s = arg1;
2276 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2277 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2279 else
2281 arg0s = arg0;
2282 arg1s = arg1;
2285 /* Compute the value of the arithmetic. */
2287 switch (code)
2289 case PLUS:
2290 val = arg0s + arg1s;
2291 break;
2293 case MINUS:
2294 val = arg0s - arg1s;
2295 break;
2297 case MULT:
2298 val = arg0s * arg1s;
2299 break;
2301 case DIV:
2302 if (arg1s == 0
2303 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2304 && arg1s == -1))
2305 return 0;
2306 val = arg0s / arg1s;
2307 break;
2309 case MOD:
2310 if (arg1s == 0
2311 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2312 && arg1s == -1))
2313 return 0;
2314 val = arg0s % arg1s;
2315 break;
2317 case UDIV:
2318 if (arg1 == 0
2319 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2320 && arg1s == -1))
2321 return 0;
2322 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2323 break;
2325 case UMOD:
2326 if (arg1 == 0
2327 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2328 && arg1s == -1))
2329 return 0;
2330 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2331 break;
2333 case AND:
2334 val = arg0 & arg1;
2335 break;
2337 case IOR:
2338 val = arg0 | arg1;
2339 break;
2341 case XOR:
2342 val = arg0 ^ arg1;
2343 break;
2345 case LSHIFTRT:
2346 case ASHIFT:
2347 case ASHIFTRT:
2348 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2349 value is in range. We can't return any old value for out-of-range
2350 arguments because either the middle-end (via shift_truncation_mask)
2351 or the back-end might be relying on target-specific knowledge.
2352 Nor can we rely on shift_truncation_mask, since the shift might
2353 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2354 if (SHIFT_COUNT_TRUNCATED)
2355 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2356 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2357 return 0;
2359 val = (code == ASHIFT
2360 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2361 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2363 /* Sign-extend the result for arithmetic right shifts. */
2364 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2365 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2366 break;
2368 case ROTATERT:
2369 if (arg1 < 0)
2370 return 0;
2372 arg1 %= width;
2373 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2374 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2375 break;
2377 case ROTATE:
2378 if (arg1 < 0)
2379 return 0;
2381 arg1 %= width;
2382 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2383 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2384 break;
2386 case COMPARE:
2387 /* Do nothing here. */
2388 return 0;
2390 case SMIN:
2391 val = arg0s <= arg1s ? arg0s : arg1s;
2392 break;
2394 case UMIN:
2395 val = ((unsigned HOST_WIDE_INT) arg0
2396 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2397 break;
2399 case SMAX:
2400 val = arg0s > arg1s ? arg0s : arg1s;
2401 break;
2403 case UMAX:
2404 val = ((unsigned HOST_WIDE_INT) arg0
2405 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2406 break;
2408 case SS_PLUS:
2409 case US_PLUS:
2410 case SS_MINUS:
2411 case US_MINUS:
2412 /* ??? There are simplifications that can be done. */
2413 return 0;
2415 default:
2416 abort ();
2419 val = trunc_int_for_mode (val, mode);
2421 return GEN_INT (val);
2424 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2425 PLUS or MINUS.
2427 Rather than test for specific case, we do this by a brute-force method
2428 and do all possible simplifications until no more changes occur. Then
2429 we rebuild the operation.
2431 If FORCE is true, then always generate the rtx. This is used to
2432 canonicalize stuff emitted from simplify_gen_binary. Note that this
2433 can still fail if the rtx is too complex. It won't fail just because
2434 the result is not 'simpler' than the input, however. */
2436 struct simplify_plus_minus_op_data
2438 rtx op;
2439 int neg;
2442 static int
2443 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2445 const struct simplify_plus_minus_op_data *d1 = p1;
2446 const struct simplify_plus_minus_op_data *d2 = p2;
2448 return (commutative_operand_precedence (d2->op)
2449 - commutative_operand_precedence (d1->op));
2452 static rtx
2453 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2454 rtx op1, int force)
2456 struct simplify_plus_minus_op_data ops[8];
2457 rtx result, tem;
2458 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2459 int first, changed;
2460 int i, j;
2462 memset (ops, 0, sizeof ops);
2464 /* Set up the two operands and then expand them until nothing has been
2465 changed. If we run out of room in our array, give up; this should
2466 almost never happen. */
2468 ops[0].op = op0;
2469 ops[0].neg = 0;
2470 ops[1].op = op1;
2471 ops[1].neg = (code == MINUS);
2475 changed = 0;
2477 for (i = 0; i < n_ops; i++)
2479 rtx this_op = ops[i].op;
2480 int this_neg = ops[i].neg;
2481 enum rtx_code this_code = GET_CODE (this_op);
2483 switch (this_code)
2485 case PLUS:
2486 case MINUS:
2487 if (n_ops == 7)
2488 return NULL_RTX;
2490 ops[n_ops].op = XEXP (this_op, 1);
2491 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2492 n_ops++;
2494 ops[i].op = XEXP (this_op, 0);
2495 input_ops++;
2496 changed = 1;
2497 break;
2499 case NEG:
2500 ops[i].op = XEXP (this_op, 0);
2501 ops[i].neg = ! this_neg;
2502 changed = 1;
2503 break;
2505 case CONST:
2506 if (n_ops < 7
2507 && GET_CODE (XEXP (this_op, 0)) == PLUS
2508 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2509 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2511 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2512 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2513 ops[n_ops].neg = this_neg;
2514 n_ops++;
2515 input_consts++;
2516 changed = 1;
2518 break;
2520 case NOT:
2521 /* ~a -> (-a - 1) */
2522 if (n_ops != 7)
2524 ops[n_ops].op = constm1_rtx;
2525 ops[n_ops++].neg = this_neg;
2526 ops[i].op = XEXP (this_op, 0);
2527 ops[i].neg = !this_neg;
2528 changed = 1;
2530 break;
2532 case CONST_INT:
2533 if (this_neg)
2535 ops[i].op = neg_const_int (mode, this_op);
2536 ops[i].neg = 0;
2537 changed = 1;
2539 break;
2541 default:
2542 break;
2546 while (changed);
2548 /* If we only have two operands, we can't do anything. */
2549 if (n_ops <= 2 && !force)
2550 return NULL_RTX;
2552 /* Count the number of CONSTs we didn't split above. */
2553 for (i = 0; i < n_ops; i++)
2554 if (GET_CODE (ops[i].op) == CONST)
2555 input_consts++;
2557 /* Now simplify each pair of operands until nothing changes. The first
2558 time through just simplify constants against each other. */
2560 first = 1;
2563 changed = first;
2565 for (i = 0; i < n_ops - 1; i++)
2566 for (j = i + 1; j < n_ops; j++)
2568 rtx lhs = ops[i].op, rhs = ops[j].op;
2569 int lneg = ops[i].neg, rneg = ops[j].neg;
2571 if (lhs != 0 && rhs != 0
2572 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2574 enum rtx_code ncode = PLUS;
2576 if (lneg != rneg)
2578 ncode = MINUS;
2579 if (lneg)
2580 tem = lhs, lhs = rhs, rhs = tem;
2582 else if (swap_commutative_operands_p (lhs, rhs))
2583 tem = lhs, lhs = rhs, rhs = tem;
2585 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2587 /* Reject "simplifications" that just wrap the two
2588 arguments in a CONST. Failure to do so can result
2589 in infinite recursion with simplify_binary_operation
2590 when it calls us to simplify CONST operations. */
2591 if (tem
2592 && ! (GET_CODE (tem) == CONST
2593 && GET_CODE (XEXP (tem, 0)) == ncode
2594 && XEXP (XEXP (tem, 0), 0) == lhs
2595 && XEXP (XEXP (tem, 0), 1) == rhs)
2596 /* Don't allow -x + -1 -> ~x simplifications in the
2597 first pass. This allows us the chance to combine
2598 the -1 with other constants. */
2599 && ! (first
2600 && GET_CODE (tem) == NOT
2601 && XEXP (tem, 0) == rhs))
2603 lneg &= rneg;
2604 if (GET_CODE (tem) == NEG)
2605 tem = XEXP (tem, 0), lneg = !lneg;
2606 if (GET_CODE (tem) == CONST_INT && lneg)
2607 tem = neg_const_int (mode, tem), lneg = 0;
2609 ops[i].op = tem;
2610 ops[i].neg = lneg;
2611 ops[j].op = NULL_RTX;
2612 changed = 1;
2617 first = 0;
2619 while (changed);
2621 /* Pack all the operands to the lower-numbered entries. */
2622 for (i = 0, j = 0; j < n_ops; j++)
2623 if (ops[j].op)
2624 ops[i++] = ops[j];
2625 n_ops = i;
2627 /* Sort the operations based on swap_commutative_operands_p. */
2628 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2630 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2631 if (n_ops == 2
2632 && GET_CODE (ops[1].op) == CONST_INT
2633 && CONSTANT_P (ops[0].op)
2634 && ops[0].neg)
2635 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2637 /* We suppressed creation of trivial CONST expressions in the
2638 combination loop to avoid recursion. Create one manually now.
2639 The combination loop should have ensured that there is exactly
2640 one CONST_INT, and the sort will have ensured that it is last
2641 in the array and that any other constant will be next-to-last. */
2643 if (n_ops > 1
2644 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2645 && CONSTANT_P (ops[n_ops - 2].op))
2647 rtx value = ops[n_ops - 1].op;
2648 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2649 value = neg_const_int (mode, value);
2650 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2651 n_ops--;
2654 /* Count the number of CONSTs that we generated. */
2655 n_consts = 0;
2656 for (i = 0; i < n_ops; i++)
2657 if (GET_CODE (ops[i].op) == CONST)
2658 n_consts++;
2660 /* Give up if we didn't reduce the number of operands we had. Make
2661 sure we count a CONST as two operands. If we have the same
2662 number of operands, but have made more CONSTs than before, this
2663 is also an improvement, so accept it. */
2664 if (!force
2665 && (n_ops + n_consts > input_ops
2666 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2667 return NULL_RTX;
2669 /* Put a non-negated operand first, if possible. */
2671 for (i = 0; i < n_ops && ops[i].neg; i++)
2672 continue;
2673 if (i == n_ops)
2674 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2675 else if (i != 0)
2677 tem = ops[0].op;
2678 ops[0] = ops[i];
2679 ops[i].op = tem;
2680 ops[i].neg = 1;
2683 /* Now make the result by performing the requested operations. */
2684 result = ops[0].op;
2685 for (i = 1; i < n_ops; i++)
2686 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2687 mode, result, ops[i].op);
2689 return result;
2692 /* Like simplify_binary_operation except used for relational operators.
2693 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2694 also be VOIDmode.
2696 CMP_MODE specifies in which mode the comparison is done in, so it is
2697 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2698 the operands or, if both are VOIDmode, the operands are compared in
2699 "infinite precision". */
2701 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2702 enum machine_mode cmp_mode, rtx op0, rtx op1)
2704 rtx tem, trueop0, trueop1;
2706 if (cmp_mode == VOIDmode)
2707 cmp_mode = GET_MODE (op0);
2708 if (cmp_mode == VOIDmode)
2709 cmp_mode = GET_MODE (op1);
2711 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2712 if (tem)
2714 #ifdef FLOAT_STORE_FLAG_VALUE
2715 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2717 if (tem == const0_rtx)
2718 return CONST0_RTX (mode);
2719 else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2721 REAL_VALUE_TYPE val;
2722 val = FLOAT_STORE_FLAG_VALUE (mode);
2723 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2726 #endif
2728 return tem;
2731 /* For the following tests, ensure const0_rtx is op1. */
2732 if (swap_commutative_operands_p (op0, op1)
2733 || (op0 == const0_rtx && op1 != const0_rtx))
2734 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2736 /* If op0 is a compare, extract the comparison arguments from it. */
2737 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2738 return simplify_relational_operation (code, mode, VOIDmode,
2739 XEXP (op0, 0), XEXP (op0, 1));
2741 if (mode == VOIDmode
2742 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2743 || CC0_P (op0))
2744 return NULL_RTX;
2746 trueop0 = avoid_constant_pool_reference (op0);
2747 trueop1 = avoid_constant_pool_reference (op1);
2748 return simplify_relational_operation_1 (code, mode, cmp_mode,
2749 trueop0, trueop1);
2752 /* This part of simplify_relational_operation is only used when CMP_MODE
2753 is not in class MODE_CC (i.e. it is a real comparison).
2755 MODE is the mode of the result, while CMP_MODE specifies in which
2756 mode the comparison is done in, so it is the mode of the operands. */
2758 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2759 enum machine_mode cmp_mode, rtx op0, rtx op1)
2761 if (GET_CODE (op1) == CONST_INT)
2763 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2765 /* If op0 is a comparison, extract the comparison arguments form it. */
2766 if (code == NE)
2768 if (GET_MODE (op0) == cmp_mode)
2769 return simplify_rtx (op0);
2770 else
2771 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2772 XEXP (op0, 0), XEXP (op0, 1));
2774 else if (code == EQ)
2776 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2777 if (new_code != UNKNOWN)
2778 return simplify_gen_relational (new_code, mode, VOIDmode,
2779 XEXP (op0, 0), XEXP (op0, 1));
2784 return NULL_RTX;
2787 /* Check if the given comparison (done in the given MODE) is actually a
2788 tautology or a contradiction.
2789 If no simplification is possible, this function returns zero.
2790 Otherwise, it returns either const_true_rtx or const0_rtx. */
2793 simplify_const_relational_operation (enum rtx_code code,
2794 enum machine_mode mode,
2795 rtx op0, rtx op1)
2797 int equal, op0lt, op0ltu, op1lt, op1ltu;
2798 rtx tem;
2799 rtx trueop0;
2800 rtx trueop1;
2802 if (mode == VOIDmode
2803 && (GET_MODE (op0) != VOIDmode
2804 || GET_MODE (op1) != VOIDmode))
2805 abort ();
2807 /* If op0 is a compare, extract the comparison arguments from it. */
2808 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2809 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2811 /* We can't simplify MODE_CC values since we don't know what the
2812 actual comparison is. */
2813 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2814 return 0;
2816 /* Make sure the constant is second. */
2817 if (swap_commutative_operands_p (op0, op1))
2819 tem = op0, op0 = op1, op1 = tem;
2820 code = swap_condition (code);
2823 trueop0 = avoid_constant_pool_reference (op0);
2824 trueop1 = avoid_constant_pool_reference (op1);
2826 /* For integer comparisons of A and B maybe we can simplify A - B and can
2827 then simplify a comparison of that with zero. If A and B are both either
2828 a register or a CONST_INT, this can't help; testing for these cases will
2829 prevent infinite recursion here and speed things up.
2831 If CODE is an unsigned comparison, then we can never do this optimization,
2832 because it gives an incorrect result if the subtraction wraps around zero.
2833 ANSI C defines unsigned operations such that they never overflow, and
2834 thus such cases can not be ignored; but we cannot do it even for
2835 signed comparisons for languages such as Java, so test flag_wrapv. */
2837 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2838 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2839 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2840 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2841 /* We cannot do this for == or != if tem is a nonzero address. */
2842 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2843 && code != GTU && code != GEU && code != LTU && code != LEU)
2844 return simplify_const_relational_operation (signed_condition (code),
2845 mode, tem, const0_rtx);
2847 if (flag_unsafe_math_optimizations && code == ORDERED)
2848 return const_true_rtx;
2850 if (flag_unsafe_math_optimizations && code == UNORDERED)
2851 return const0_rtx;
2853 /* For modes without NaNs, if the two operands are equal, we know the
2854 result except if they have side-effects. */
2855 if (! HONOR_NANS (GET_MODE (trueop0))
2856 && rtx_equal_p (trueop0, trueop1)
2857 && ! side_effects_p (trueop0))
2858 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2860 /* If the operands are floating-point constants, see if we can fold
2861 the result. */
2862 else if (GET_CODE (trueop0) == CONST_DOUBLE
2863 && GET_CODE (trueop1) == CONST_DOUBLE
2864 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2866 REAL_VALUE_TYPE d0, d1;
2868 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2869 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2871 /* Comparisons are unordered iff at least one of the values is NaN. */
2872 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2873 switch (code)
2875 case UNEQ:
2876 case UNLT:
2877 case UNGT:
2878 case UNLE:
2879 case UNGE:
2880 case NE:
2881 case UNORDERED:
2882 return const_true_rtx;
2883 case EQ:
2884 case LT:
2885 case GT:
2886 case LE:
2887 case GE:
2888 case LTGT:
2889 case ORDERED:
2890 return const0_rtx;
2891 default:
2892 return 0;
2895 equal = REAL_VALUES_EQUAL (d0, d1);
2896 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2897 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2900 /* Otherwise, see if the operands are both integers. */
2901 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2902 && (GET_CODE (trueop0) == CONST_DOUBLE
2903 || GET_CODE (trueop0) == CONST_INT)
2904 && (GET_CODE (trueop1) == CONST_DOUBLE
2905 || GET_CODE (trueop1) == CONST_INT))
2907 int width = GET_MODE_BITSIZE (mode);
2908 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2909 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2911 /* Get the two words comprising each integer constant. */
2912 if (GET_CODE (trueop0) == CONST_DOUBLE)
2914 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2915 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2917 else
2919 l0u = l0s = INTVAL (trueop0);
2920 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2923 if (GET_CODE (trueop1) == CONST_DOUBLE)
2925 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2926 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2928 else
2930 l1u = l1s = INTVAL (trueop1);
2931 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2934 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2935 we have to sign or zero-extend the values. */
2936 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2938 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2939 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2941 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2942 l0s |= ((HOST_WIDE_INT) (-1) << width);
2944 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2945 l1s |= ((HOST_WIDE_INT) (-1) << width);
2947 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2948 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2950 equal = (h0u == h1u && l0u == l1u);
2951 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2952 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2953 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2954 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2957 /* Otherwise, there are some code-specific tests we can make. */
2958 else
2960 /* Optimize comparisons with upper and lower bounds. */
2961 if (SCALAR_INT_MODE_P (mode)
2962 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2964 rtx mmin, mmax;
2965 int sign;
2967 if (code == GEU
2968 || code == LEU
2969 || code == GTU
2970 || code == LTU)
2971 sign = 0;
2972 else
2973 sign = 1;
2975 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
2977 tem = NULL_RTX;
2978 switch (code)
2980 case GEU:
2981 case GE:
2982 /* x >= min is always true. */
2983 if (rtx_equal_p (trueop1, mmin))
2984 tem = const_true_rtx;
2985 else
2986 break;
2988 case LEU:
2989 case LE:
2990 /* x <= max is always true. */
2991 if (rtx_equal_p (trueop1, mmax))
2992 tem = const_true_rtx;
2993 break;
2995 case GTU:
2996 case GT:
2997 /* x > max is always false. */
2998 if (rtx_equal_p (trueop1, mmax))
2999 tem = const0_rtx;
3000 break;
3002 case LTU:
3003 case LT:
3004 /* x < min is always false. */
3005 if (rtx_equal_p (trueop1, mmin))
3006 tem = const0_rtx;
3007 break;
3009 default:
3010 break;
3012 if (tem == const0_rtx
3013 || tem == const_true_rtx)
3014 return tem;
3017 switch (code)
3019 case EQ:
3020 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3021 return const0_rtx;
3022 break;
3024 case NE:
3025 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3026 return const_true_rtx;
3027 break;
3029 case LT:
3030 /* Optimize abs(x) < 0.0. */
3031 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3033 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3034 : trueop0;
3035 if (GET_CODE (tem) == ABS)
3036 return const0_rtx;
3038 break;
3040 case GE:
3041 /* Optimize abs(x) >= 0.0. */
3042 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3044 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3045 : trueop0;
3046 if (GET_CODE (tem) == ABS)
3047 return const_true_rtx;
3049 break;
3051 case UNGE:
3052 /* Optimize ! (abs(x) < 0.0). */
3053 if (trueop1 == CONST0_RTX (mode))
3055 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3056 : trueop0;
3057 if (GET_CODE (tem) == ABS)
3058 return const_true_rtx;
3060 break;
3062 default:
3063 break;
3066 return 0;
3069 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3070 as appropriate. */
3071 switch (code)
3073 case EQ:
3074 case UNEQ:
3075 return equal ? const_true_rtx : const0_rtx;
3076 case NE:
3077 case LTGT:
3078 return ! equal ? const_true_rtx : const0_rtx;
3079 case LT:
3080 case UNLT:
3081 return op0lt ? const_true_rtx : const0_rtx;
3082 case GT:
3083 case UNGT:
3084 return op1lt ? const_true_rtx : const0_rtx;
3085 case LTU:
3086 return op0ltu ? const_true_rtx : const0_rtx;
3087 case GTU:
3088 return op1ltu ? const_true_rtx : const0_rtx;
3089 case LE:
3090 case UNLE:
3091 return equal || op0lt ? const_true_rtx : const0_rtx;
3092 case GE:
3093 case UNGE:
3094 return equal || op1lt ? const_true_rtx : const0_rtx;
3095 case LEU:
3096 return equal || op0ltu ? const_true_rtx : const0_rtx;
3097 case GEU:
3098 return equal || op1ltu ? const_true_rtx : const0_rtx;
3099 case ORDERED:
3100 return const_true_rtx;
3101 case UNORDERED:
3102 return const0_rtx;
3103 default:
3104 abort ();
3108 /* Simplify CODE, an operation with result mode MODE and three operands,
3109 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3110 a constant. Return 0 if no simplifications is possible. */
3113 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3114 enum machine_mode op0_mode, rtx op0, rtx op1,
3115 rtx op2)
3117 unsigned int width = GET_MODE_BITSIZE (mode);
3119 /* VOIDmode means "infinite" precision. */
3120 if (width == 0)
3121 width = HOST_BITS_PER_WIDE_INT;
3123 switch (code)
3125 case SIGN_EXTRACT:
3126 case ZERO_EXTRACT:
3127 if (GET_CODE (op0) == CONST_INT
3128 && GET_CODE (op1) == CONST_INT
3129 && GET_CODE (op2) == CONST_INT
3130 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3131 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3133 /* Extracting a bit-field from a constant */
3134 HOST_WIDE_INT val = INTVAL (op0);
3136 if (BITS_BIG_ENDIAN)
3137 val >>= (GET_MODE_BITSIZE (op0_mode)
3138 - INTVAL (op2) - INTVAL (op1));
3139 else
3140 val >>= INTVAL (op2);
3142 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3144 /* First zero-extend. */
3145 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3146 /* If desired, propagate sign bit. */
3147 if (code == SIGN_EXTRACT
3148 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3149 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3152 /* Clear the bits that don't belong in our mode,
3153 unless they and our sign bit are all one.
3154 So we get either a reasonable negative value or a reasonable
3155 unsigned value for this mode. */
3156 if (width < HOST_BITS_PER_WIDE_INT
3157 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3158 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3159 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3161 return GEN_INT (val);
3163 break;
3165 case IF_THEN_ELSE:
3166 if (GET_CODE (op0) == CONST_INT)
3167 return op0 != const0_rtx ? op1 : op2;
3169 /* Convert c ? a : a into "a". */
3170 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3171 return op1;
3173 /* Convert a != b ? a : b into "a". */
3174 if (GET_CODE (op0) == NE
3175 && ! side_effects_p (op0)
3176 && ! HONOR_NANS (mode)
3177 && ! HONOR_SIGNED_ZEROS (mode)
3178 && ((rtx_equal_p (XEXP (op0, 0), op1)
3179 && rtx_equal_p (XEXP (op0, 1), op2))
3180 || (rtx_equal_p (XEXP (op0, 0), op2)
3181 && rtx_equal_p (XEXP (op0, 1), op1))))
3182 return op1;
3184 /* Convert a == b ? a : b into "b". */
3185 if (GET_CODE (op0) == EQ
3186 && ! side_effects_p (op0)
3187 && ! HONOR_NANS (mode)
3188 && ! HONOR_SIGNED_ZEROS (mode)
3189 && ((rtx_equal_p (XEXP (op0, 0), op1)
3190 && rtx_equal_p (XEXP (op0, 1), op2))
3191 || (rtx_equal_p (XEXP (op0, 0), op2)
3192 && rtx_equal_p (XEXP (op0, 1), op1))))
3193 return op2;
3195 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3197 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3198 ? GET_MODE (XEXP (op0, 1))
3199 : GET_MODE (XEXP (op0, 0)));
3200 rtx temp;
3202 /* Look for happy constants in op1 and op2. */
3203 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3205 HOST_WIDE_INT t = INTVAL (op1);
3206 HOST_WIDE_INT f = INTVAL (op2);
3208 if (t == STORE_FLAG_VALUE && f == 0)
3209 code = GET_CODE (op0);
3210 else if (t == 0 && f == STORE_FLAG_VALUE)
3212 enum rtx_code tmp;
3213 tmp = reversed_comparison_code (op0, NULL_RTX);
3214 if (tmp == UNKNOWN)
3215 break;
3216 code = tmp;
3218 else
3219 break;
3221 return simplify_gen_relational (code, mode, cmp_mode,
3222 XEXP (op0, 0), XEXP (op0, 1));
3225 if (cmp_mode == VOIDmode)
3226 cmp_mode = op0_mode;
3227 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3228 cmp_mode, XEXP (op0, 0),
3229 XEXP (op0, 1));
3231 /* See if any simplifications were possible. */
3232 if (temp)
3234 if (GET_CODE (temp) == CONST_INT)
3235 return temp == const0_rtx ? op2 : op1;
3236 else if (temp)
3237 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3240 break;
3242 case VEC_MERGE:
3243 if (GET_MODE (op0) != mode
3244 || GET_MODE (op1) != mode
3245 || !VECTOR_MODE_P (mode))
3246 abort ();
3247 op2 = avoid_constant_pool_reference (op2);
3248 if (GET_CODE (op2) == CONST_INT)
3250 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3251 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3252 int mask = (1 << n_elts) - 1;
3254 if (!(INTVAL (op2) & mask))
3255 return op1;
3256 if ((INTVAL (op2) & mask) == mask)
3257 return op0;
3259 op0 = avoid_constant_pool_reference (op0);
3260 op1 = avoid_constant_pool_reference (op1);
3261 if (GET_CODE (op0) == CONST_VECTOR
3262 && GET_CODE (op1) == CONST_VECTOR)
3264 rtvec v = rtvec_alloc (n_elts);
3265 unsigned int i;
3267 for (i = 0; i < n_elts; i++)
3268 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3269 ? CONST_VECTOR_ELT (op0, i)
3270 : CONST_VECTOR_ELT (op1, i));
3271 return gen_rtx_CONST_VECTOR (mode, v);
3274 break;
3276 default:
3277 abort ();
3280 return 0;
3283 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3284 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3286 Works by unpacking OP into a collection of 8-bit values
3287 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3288 and then repacking them again for OUTERMODE. */
3290 static rtx
3291 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3292 enum machine_mode innermode, unsigned int byte)
3294 /* We support up to 512-bit values (for V8DFmode). */
3295 enum {
3296 max_bitsize = 512,
3297 value_bit = 8,
3298 value_mask = (1 << value_bit) - 1
3300 unsigned char value[max_bitsize / value_bit];
3301 int value_start;
3302 int i;
3303 int elem;
3305 int num_elem;
3306 rtx * elems;
3307 int elem_bitsize;
3308 rtx result_s;
3309 rtvec result_v = NULL;
3310 enum mode_class outer_class;
3311 enum machine_mode outer_submode;
3313 /* Some ports misuse CCmode. */
3314 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3315 return op;
3317 /* Unpack the value. */
3319 if (GET_CODE (op) == CONST_VECTOR)
3321 num_elem = CONST_VECTOR_NUNITS (op);
3322 elems = &CONST_VECTOR_ELT (op, 0);
3323 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3325 else
3327 num_elem = 1;
3328 elems = &op;
3329 elem_bitsize = max_bitsize;
3332 if (BITS_PER_UNIT % value_bit != 0)
3333 abort (); /* Too complicated; reducing value_bit may help. */
3334 if (elem_bitsize % BITS_PER_UNIT != 0)
3335 abort (); /* I don't know how to handle endianness of sub-units. */
3337 for (elem = 0; elem < num_elem; elem++)
3339 unsigned char * vp;
3340 rtx el = elems[elem];
3342 /* Vectors are kept in target memory order. (This is probably
3343 a mistake.) */
3345 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3346 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3347 / BITS_PER_UNIT);
3348 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3349 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3350 unsigned bytele = (subword_byte % UNITS_PER_WORD
3351 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3352 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3355 switch (GET_CODE (el))
3357 case CONST_INT:
3358 for (i = 0;
3359 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3360 i += value_bit)
3361 *vp++ = INTVAL (el) >> i;
3362 /* CONST_INTs are always logically sign-extended. */
3363 for (; i < elem_bitsize; i += value_bit)
3364 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3365 break;
3367 case CONST_DOUBLE:
3368 if (GET_MODE (el) == VOIDmode)
3370 /* If this triggers, someone should have generated a
3371 CONST_INT instead. */
3372 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3373 abort ();
3375 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3376 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3377 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3379 *vp++
3380 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3381 i += value_bit;
3383 /* It shouldn't matter what's done here, so fill it with
3384 zero. */
3385 for (; i < max_bitsize; i += value_bit)
3386 *vp++ = 0;
3388 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3390 long tmp[max_bitsize / 32];
3391 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3393 if (bitsize > elem_bitsize)
3394 abort ();
3395 if (bitsize % value_bit != 0)
3396 abort ();
3398 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3399 GET_MODE (el));
3401 /* real_to_target produces its result in words affected by
3402 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3403 and use WORDS_BIG_ENDIAN instead; see the documentation
3404 of SUBREG in rtl.texi. */
3405 for (i = 0; i < bitsize; i += value_bit)
3407 int ibase;
3408 if (WORDS_BIG_ENDIAN)
3409 ibase = bitsize - 1 - i;
3410 else
3411 ibase = i;
3412 *vp++ = tmp[ibase / 32] >> i % 32;
3415 /* It shouldn't matter what's done here, so fill it with
3416 zero. */
3417 for (; i < elem_bitsize; i += value_bit)
3418 *vp++ = 0;
3420 else
3421 abort ();
3422 break;
3424 default:
3425 abort ();
3429 /* Now, pick the right byte to start with. */
3430 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3431 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3432 will already have offset 0. */
3433 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3435 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3436 - byte);
3437 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3438 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3439 byte = (subword_byte % UNITS_PER_WORD
3440 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3443 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3444 so if it's become negative it will instead be very large.) */
3445 if (byte >= GET_MODE_SIZE (innermode))
3446 abort ();
3448 /* Convert from bytes to chunks of size value_bit. */
3449 value_start = byte * (BITS_PER_UNIT / value_bit);
3451 /* Re-pack the value. */
3453 if (VECTOR_MODE_P (outermode))
3455 num_elem = GET_MODE_NUNITS (outermode);
3456 result_v = rtvec_alloc (num_elem);
3457 elems = &RTVEC_ELT (result_v, 0);
3458 outer_submode = GET_MODE_INNER (outermode);
3460 else
3462 num_elem = 1;
3463 elems = &result_s;
3464 outer_submode = outermode;
3467 outer_class = GET_MODE_CLASS (outer_submode);
3468 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3470 if (elem_bitsize % value_bit != 0)
3471 abort ();
3472 if (elem_bitsize + value_start * value_bit > max_bitsize)
3473 abort ();
3475 for (elem = 0; elem < num_elem; elem++)
3477 unsigned char *vp;
3479 /* Vectors are stored in target memory order. (This is probably
3480 a mistake.) */
3482 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3483 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3484 / BITS_PER_UNIT);
3485 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3486 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3487 unsigned bytele = (subword_byte % UNITS_PER_WORD
3488 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3489 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3492 switch (outer_class)
3494 case MODE_INT:
3495 case MODE_PARTIAL_INT:
3497 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3499 for (i = 0;
3500 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3501 i += value_bit)
3502 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3503 for (; i < elem_bitsize; i += value_bit)
3504 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3505 << (i - HOST_BITS_PER_WIDE_INT));
3507 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3508 know why. */
3509 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3510 elems[elem] = gen_int_mode (lo, outer_submode);
3511 else
3512 elems[elem] = immed_double_const (lo, hi, outer_submode);
3514 break;
3516 case MODE_FLOAT:
3518 REAL_VALUE_TYPE r;
3519 long tmp[max_bitsize / 32];
3521 /* real_from_target wants its input in words affected by
3522 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3523 and use WORDS_BIG_ENDIAN instead; see the documentation
3524 of SUBREG in rtl.texi. */
3525 for (i = 0; i < max_bitsize / 32; i++)
3526 tmp[i] = 0;
3527 for (i = 0; i < elem_bitsize; i += value_bit)
3529 int ibase;
3530 if (WORDS_BIG_ENDIAN)
3531 ibase = elem_bitsize - 1 - i;
3532 else
3533 ibase = i;
3534 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3537 real_from_target (&r, tmp, outer_submode);
3538 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3540 break;
3542 default:
3543 abort ();
3546 if (VECTOR_MODE_P (outermode))
3547 return gen_rtx_CONST_VECTOR (outermode, result_v);
3548 else
3549 return result_s;
3552 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3553 Return 0 if no simplifications are possible. */
3555 simplify_subreg (enum machine_mode outermode, rtx op,
3556 enum machine_mode innermode, unsigned int byte)
3558 /* Little bit of sanity checking. */
3559 if (innermode == VOIDmode || outermode == VOIDmode
3560 || innermode == BLKmode || outermode == BLKmode)
3561 abort ();
3563 if (GET_MODE (op) != innermode
3564 && GET_MODE (op) != VOIDmode)
3565 abort ();
3567 if (byte % GET_MODE_SIZE (outermode)
3568 || byte >= GET_MODE_SIZE (innermode))
3569 abort ();
3571 if (outermode == innermode && !byte)
3572 return op;
3574 if (GET_CODE (op) == CONST_INT
3575 || GET_CODE (op) == CONST_DOUBLE
3576 || GET_CODE (op) == CONST_VECTOR)
3577 return simplify_immed_subreg (outermode, op, innermode, byte);
3579 /* Changing mode twice with SUBREG => just change it once,
3580 or not at all if changing back op starting mode. */
3581 if (GET_CODE (op) == SUBREG)
3583 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3584 int final_offset = byte + SUBREG_BYTE (op);
3585 rtx newx;
3587 if (outermode == innermostmode
3588 && byte == 0 && SUBREG_BYTE (op) == 0)
3589 return SUBREG_REG (op);
3591 /* The SUBREG_BYTE represents offset, as if the value were stored
3592 in memory. Irritating exception is paradoxical subreg, where
3593 we define SUBREG_BYTE to be 0. On big endian machines, this
3594 value should be negative. For a moment, undo this exception. */
3595 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3597 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3598 if (WORDS_BIG_ENDIAN)
3599 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3600 if (BYTES_BIG_ENDIAN)
3601 final_offset += difference % UNITS_PER_WORD;
3603 if (SUBREG_BYTE (op) == 0
3604 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3606 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3607 if (WORDS_BIG_ENDIAN)
3608 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3609 if (BYTES_BIG_ENDIAN)
3610 final_offset += difference % UNITS_PER_WORD;
3613 /* See whether resulting subreg will be paradoxical. */
3614 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3616 /* In nonparadoxical subregs we can't handle negative offsets. */
3617 if (final_offset < 0)
3618 return NULL_RTX;
3619 /* Bail out in case resulting subreg would be incorrect. */
3620 if (final_offset % GET_MODE_SIZE (outermode)
3621 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3622 return NULL_RTX;
3624 else
3626 int offset = 0;
3627 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3629 /* In paradoxical subreg, see if we are still looking on lower part.
3630 If so, our SUBREG_BYTE will be 0. */
3631 if (WORDS_BIG_ENDIAN)
3632 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3633 if (BYTES_BIG_ENDIAN)
3634 offset += difference % UNITS_PER_WORD;
3635 if (offset == final_offset)
3636 final_offset = 0;
3637 else
3638 return NULL_RTX;
3641 /* Recurse for further possible simplifications. */
3642 newx = simplify_subreg (outermode, SUBREG_REG (op),
3643 GET_MODE (SUBREG_REG (op)),
3644 final_offset);
3645 if (newx)
3646 return newx;
3647 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3650 /* SUBREG of a hard register => just change the register number
3651 and/or mode. If the hard register is not valid in that mode,
3652 suppress this simplification. If the hard register is the stack,
3653 frame, or argument pointer, leave this as a SUBREG. */
3655 if (REG_P (op)
3656 && REGNO (op) < FIRST_PSEUDO_REGISTER
3657 #ifdef CANNOT_CHANGE_MODE_CLASS
3658 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3659 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3660 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3661 #endif
3662 && ((reload_completed && !frame_pointer_needed)
3663 || (REGNO (op) != FRAME_POINTER_REGNUM
3664 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3665 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3666 #endif
3668 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3669 && REGNO (op) != ARG_POINTER_REGNUM
3670 #endif
3671 && REGNO (op) != STACK_POINTER_REGNUM
3672 && subreg_offset_representable_p (REGNO (op), innermode,
3673 byte, outermode))
3675 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3676 int final_regno = subreg_hard_regno (tem, 0);
3678 /* ??? We do allow it if the current REG is not valid for
3679 its mode. This is a kludge to work around how float/complex
3680 arguments are passed on 32-bit SPARC and should be fixed. */
3681 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3682 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3684 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3686 /* Propagate original regno. We don't have any way to specify
3687 the offset inside original regno, so do so only for lowpart.
3688 The information is used only by alias analysis that can not
3689 grog partial register anyway. */
3691 if (subreg_lowpart_offset (outermode, innermode) == byte)
3692 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3693 return x;
3697 /* If we have a SUBREG of a register that we are replacing and we are
3698 replacing it with a MEM, make a new MEM and try replacing the
3699 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3700 or if we would be widening it. */
3702 if (MEM_P (op)
3703 && ! mode_dependent_address_p (XEXP (op, 0))
3704 /* Allow splitting of volatile memory references in case we don't
3705 have instruction to move the whole thing. */
3706 && (! MEM_VOLATILE_P (op)
3707 || ! have_insn_for (SET, innermode))
3708 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3709 return adjust_address_nv (op, outermode, byte);
3711 /* Handle complex values represented as CONCAT
3712 of real and imaginary part. */
3713 if (GET_CODE (op) == CONCAT)
3715 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3716 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3717 unsigned int final_offset;
3718 rtx res;
3720 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3721 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3722 if (res)
3723 return res;
3724 /* We can at least simplify it by referring directly to the
3725 relevant part. */
3726 return gen_rtx_SUBREG (outermode, part, final_offset);
3729 /* Optimize SUBREG truncations of zero and sign extended values. */
3730 if ((GET_CODE (op) == ZERO_EXTEND
3731 || GET_CODE (op) == SIGN_EXTEND)
3732 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3734 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3736 /* If we're requesting the lowpart of a zero or sign extension,
3737 there are three possibilities. If the outermode is the same
3738 as the origmode, we can omit both the extension and the subreg.
3739 If the outermode is not larger than the origmode, we can apply
3740 the truncation without the extension. Finally, if the outermode
3741 is larger than the origmode, but both are integer modes, we
3742 can just extend to the appropriate mode. */
3743 if (bitpos == 0)
3745 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3746 if (outermode == origmode)
3747 return XEXP (op, 0);
3748 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3749 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3750 subreg_lowpart_offset (outermode,
3751 origmode));
3752 if (SCALAR_INT_MODE_P (outermode))
3753 return simplify_gen_unary (GET_CODE (op), outermode,
3754 XEXP (op, 0), origmode);
3757 /* A SUBREG resulting from a zero extension may fold to zero if
3758 it extracts higher bits that the ZERO_EXTEND's source bits. */
3759 if (GET_CODE (op) == ZERO_EXTEND
3760 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3761 return CONST0_RTX (outermode);
3764 return NULL_RTX;
3767 /* Make a SUBREG operation or equivalent if it folds. */
3770 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3771 enum machine_mode innermode, unsigned int byte)
3773 rtx newx;
3774 /* Little bit of sanity checking. */
3775 if (innermode == VOIDmode || outermode == VOIDmode
3776 || innermode == BLKmode || outermode == BLKmode)
3777 abort ();
3779 if (GET_MODE (op) != innermode
3780 && GET_MODE (op) != VOIDmode)
3781 abort ();
3783 if (byte % GET_MODE_SIZE (outermode)
3784 || byte >= GET_MODE_SIZE (innermode))
3785 abort ();
3787 newx = simplify_subreg (outermode, op, innermode, byte);
3788 if (newx)
3789 return newx;
3791 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3792 return NULL_RTX;
3794 return gen_rtx_SUBREG (outermode, op, byte);
3796 /* Simplify X, an rtx expression.
3798 Return the simplified expression or NULL if no simplifications
3799 were possible.
3801 This is the preferred entry point into the simplification routines;
3802 however, we still allow passes to call the more specific routines.
3804 Right now GCC has three (yes, three) major bodies of RTL simplification
3805 code that need to be unified.
3807 1. fold_rtx in cse.c. This code uses various CSE specific
3808 information to aid in RTL simplification.
3810 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3811 it uses combine specific information to aid in RTL
3812 simplification.
3814 3. The routines in this file.
3817 Long term we want to only have one body of simplification code; to
3818 get to that state I recommend the following steps:
3820 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3821 which are not pass dependent state into these routines.
3823 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3824 use this routine whenever possible.
3826 3. Allow for pass dependent state to be provided to these
3827 routines and add simplifications based on the pass dependent
3828 state. Remove code from cse.c & combine.c that becomes
3829 redundant/dead.
3831 It will take time, but ultimately the compiler will be easier to
3832 maintain and improve. It's totally silly that when we add a
3833 simplification that it needs to be added to 4 places (3 for RTL
3834 simplification and 1 for tree simplification. */
3837 simplify_rtx (rtx x)
3839 enum rtx_code code = GET_CODE (x);
3840 enum machine_mode mode = GET_MODE (x);
3842 switch (GET_RTX_CLASS (code))
3844 case RTX_UNARY:
3845 return simplify_unary_operation (code, mode,
3846 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3847 case RTX_COMM_ARITH:
3848 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3849 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3851 /* Fall through.... */
3853 case RTX_BIN_ARITH:
3854 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3856 case RTX_TERNARY:
3857 case RTX_BITFIELD_OPS:
3858 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3859 XEXP (x, 0), XEXP (x, 1),
3860 XEXP (x, 2));
3862 case RTX_COMPARE:
3863 case RTX_COMM_COMPARE:
3864 return simplify_relational_operation (code, mode,
3865 ((GET_MODE (XEXP (x, 0))
3866 != VOIDmode)
3867 ? GET_MODE (XEXP (x, 0))
3868 : GET_MODE (XEXP (x, 1))),
3869 XEXP (x, 0),
3870 XEXP (x, 1));
3872 case RTX_EXTRA:
3873 if (code == SUBREG)
3874 return simplify_gen_subreg (mode, SUBREG_REG (x),
3875 GET_MODE (SUBREG_REG (x)),
3876 SUBREG_BYTE (x));
3877 break;
3879 case RTX_OBJ:
3880 if (code == LO_SUM)
3882 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3883 if (GET_CODE (XEXP (x, 0)) == HIGH
3884 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3885 return XEXP (x, 1);
3887 break;
3889 default:
3890 break;
3892 return NULL;