PR other/16240
[official-gcc.git] / gcc / simplify-rtx.c
blob0b716a40a0edf8bf61227084303e37728934c857
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool mode_signbit_p (enum machine_mode, rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 rtx, int);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
66 static rtx
67 neg_const_int (enum machine_mode mode, rtx i)
69 return gen_int_mode (- INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
75 static bool
76 mode_signbit_p (enum machine_mode mode, rtx x)
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
82 return false;
84 width = GET_MODE_BITSIZE (mode);
85 if (width == 0)
86 return false;
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x) == CONST_INT)
90 val = INTVAL (x);
91 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
98 else
99 return false;
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
110 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
111 rtx op1)
113 rtx tem;
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
122 if (tem)
123 return tem;
125 /* Handle addition and subtraction specially. Otherwise, just form
126 the operation. */
128 if (code == PLUS || code == MINUS)
130 tem = simplify_plus_minus (code, mode, op0, op1, 1);
131 if (tem)
132 return tem;
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
141 avoid_constant_pool_reference (rtx x)
143 rtx c, tmp, addr;
144 enum machine_mode cmode;
146 switch (GET_CODE (x))
148 case MEM:
149 break;
151 case FLOAT_EXTEND:
152 /* Handle float extensions of constant pool references. */
153 tmp = XEXP (x, 0);
154 c = avoid_constant_pool_reference (tmp);
155 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
157 REAL_VALUE_TYPE d;
159 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
162 return x;
164 default:
165 return x;
168 addr = XEXP (x, 0);
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr = targetm.delegitimize_address (addr);
173 if (GET_CODE (addr) == LO_SUM)
174 addr = XEXP (addr, 1);
176 if (GET_CODE (addr) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr))
178 return x;
180 c = get_pool_constant (addr);
181 cmode = get_pool_mode (addr);
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode != GET_MODE (x))
188 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
189 return c ? c : x;
192 return c;
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
199 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
200 enum machine_mode op_mode)
202 rtx tem;
204 /* If this simplifies, use it. */
205 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
206 return tem;
208 return gen_rtx_fmt_e (code, mode, op);
211 /* Likewise for ternary operations. */
214 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
215 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
217 rtx tem;
219 /* If this simplifies, use it. */
220 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
221 op0, op1, op2)))
222 return tem;
224 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
231 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
232 enum machine_mode cmp_mode, rtx op0, rtx op1)
234 rtx tem;
236 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
237 op0, op1)))
238 return tem;
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
243 /* Replace all occurrences of OLD in X with NEW and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (rtx x, rtx old, rtx new)
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251 enum machine_mode op_mode;
252 rtx op0, op1, op2;
254 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
258 if (x == old)
259 return new;
261 switch (GET_RTX_CLASS (code))
263 case RTX_UNARY:
264 op0 = XEXP (x, 0);
265 op_mode = GET_MODE (op0);
266 op0 = simplify_replace_rtx (op0, old, new);
267 if (op0 == XEXP (x, 0))
268 return x;
269 return simplify_gen_unary (code, mode, op0, op_mode);
271 case RTX_BIN_ARITH:
272 case RTX_COMM_ARITH:
273 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
274 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
275 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
276 return x;
277 return simplify_gen_binary (code, mode, op0, op1);
279 case RTX_COMPARE:
280 case RTX_COMM_COMPARE:
281 op0 = XEXP (x, 0);
282 op1 = XEXP (x, 1);
283 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
284 op0 = simplify_replace_rtx (op0, old, new);
285 op1 = simplify_replace_rtx (op1, old, new);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return x;
288 return simplify_gen_relational (code, mode, op_mode, op0, op1);
290 case RTX_TERNARY:
291 case RTX_BITFIELD_OPS:
292 op0 = XEXP (x, 0);
293 op_mode = GET_MODE (op0);
294 op0 = simplify_replace_rtx (op0, old, new);
295 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
296 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
298 return x;
299 if (op_mode == VOIDmode)
300 op_mode = GET_MODE (op0);
301 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
303 case RTX_EXTRA:
304 /* The only case we try to handle is a SUBREG. */
305 if (code == SUBREG)
307 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
308 if (op0 == SUBREG_REG (x))
309 return x;
310 op0 = simplify_gen_subreg (GET_MODE (x), op0,
311 GET_MODE (SUBREG_REG (x)),
312 SUBREG_BYTE (x));
313 return op0 ? op0 : x;
315 break;
317 case RTX_OBJ:
318 if (code == MEM)
320 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
321 if (op0 == XEXP (x, 0))
322 return x;
323 return replace_equiv_address_nv (x, op0);
325 else if (code == LO_SUM)
327 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
328 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
332 return op1;
334 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
335 return x;
336 return gen_rtx_LO_SUM (mode, op0, op1);
338 else if (code == REG)
340 if (REG_P (old) && REGNO (x) == REGNO (old))
341 return new;
343 break;
345 default:
346 break;
348 return x;
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
355 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
356 rtx op, enum machine_mode op_mode)
358 unsigned int width = GET_MODE_BITSIZE (mode);
359 rtx trueop = avoid_constant_pool_reference (op);
361 if (code == VEC_DUPLICATE)
363 if (!VECTOR_MODE_P (mode))
364 abort ();
365 if (GET_MODE (trueop) != VOIDmode
366 && !VECTOR_MODE_P (GET_MODE (trueop))
367 && GET_MODE_INNER (mode) != GET_MODE (trueop))
368 abort ();
369 if (GET_MODE (trueop) != VOIDmode
370 && VECTOR_MODE_P (GET_MODE (trueop))
371 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
372 abort ();
373 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
374 || GET_CODE (trueop) == CONST_VECTOR)
376 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
377 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
378 rtvec v = rtvec_alloc (n_elts);
379 unsigned int i;
381 if (GET_CODE (trueop) != CONST_VECTOR)
382 for (i = 0; i < n_elts; i++)
383 RTVEC_ELT (v, i) = trueop;
384 else
386 enum machine_mode inmode = GET_MODE (trueop);
387 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
388 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
390 if (in_n_elts >= n_elts || n_elts % in_n_elts)
391 abort ();
392 for (i = 0; i < n_elts; i++)
393 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
395 return gen_rtx_CONST_VECTOR (mode, v);
398 else if (GET_CODE (op) == CONST)
399 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
401 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
403 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
404 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
405 enum machine_mode opmode = GET_MODE (trueop);
406 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
407 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
408 rtvec v = rtvec_alloc (n_elts);
409 unsigned int i;
411 if (op_n_elts != n_elts)
412 abort ();
414 for (i = 0; i < n_elts; i++)
416 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
417 CONST_VECTOR_ELT (trueop, i),
418 GET_MODE_INNER (opmode));
419 if (!x)
420 return 0;
421 RTVEC_ELT (v, i) = x;
423 return gen_rtx_CONST_VECTOR (mode, v);
426 /* The order of these tests is critical so that, for example, we don't
427 check the wrong mode (input vs. output) for a conversion operation,
428 such as FIX. At some point, this should be simplified. */
430 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
431 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
433 HOST_WIDE_INT hv, lv;
434 REAL_VALUE_TYPE d;
436 if (GET_CODE (trueop) == CONST_INT)
437 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
438 else
439 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
441 REAL_VALUE_FROM_INT (d, lv, hv, mode);
442 d = real_value_truncate (mode, d);
443 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
445 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
446 && (GET_CODE (trueop) == CONST_DOUBLE
447 || GET_CODE (trueop) == CONST_INT))
449 HOST_WIDE_INT hv, lv;
450 REAL_VALUE_TYPE d;
452 if (GET_CODE (trueop) == CONST_INT)
453 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
454 else
455 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
457 if (op_mode == VOIDmode)
459 /* We don't know how to interpret negative-looking numbers in
460 this case, so don't try to fold those. */
461 if (hv < 0)
462 return 0;
464 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
466 else
467 hv = 0, lv &= GET_MODE_MASK (op_mode);
469 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
470 d = real_value_truncate (mode, d);
471 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
474 if (GET_CODE (trueop) == CONST_INT
475 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
477 HOST_WIDE_INT arg0 = INTVAL (trueop);
478 HOST_WIDE_INT val;
480 switch (code)
482 case NOT:
483 val = ~ arg0;
484 break;
486 case NEG:
487 val = - arg0;
488 break;
490 case ABS:
491 val = (arg0 >= 0 ? arg0 : - arg0);
492 break;
494 case FFS:
495 /* Don't use ffs here. Instead, get low order bit and then its
496 number. If arg0 is zero, this will return 0, as desired. */
497 arg0 &= GET_MODE_MASK (mode);
498 val = exact_log2 (arg0 & (- arg0)) + 1;
499 break;
501 case CLZ:
502 arg0 &= GET_MODE_MASK (mode);
503 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
505 else
506 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
507 break;
509 case CTZ:
510 arg0 &= GET_MODE_MASK (mode);
511 if (arg0 == 0)
513 /* Even if the value at zero is undefined, we have to come
514 up with some replacement. Seems good enough. */
515 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
516 val = GET_MODE_BITSIZE (mode);
518 else
519 val = exact_log2 (arg0 & -arg0);
520 break;
522 case POPCOUNT:
523 arg0 &= GET_MODE_MASK (mode);
524 val = 0;
525 while (arg0)
526 val++, arg0 &= arg0 - 1;
527 break;
529 case PARITY:
530 arg0 &= GET_MODE_MASK (mode);
531 val = 0;
532 while (arg0)
533 val++, arg0 &= arg0 - 1;
534 val &= 1;
535 break;
537 case TRUNCATE:
538 val = arg0;
539 break;
541 case ZERO_EXTEND:
542 /* When zero-extending a CONST_INT, we need to know its
543 original mode. */
544 if (op_mode == VOIDmode)
545 abort ();
546 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
548 /* If we were really extending the mode,
549 we would have to distinguish between zero-extension
550 and sign-extension. */
551 if (width != GET_MODE_BITSIZE (op_mode))
552 abort ();
553 val = arg0;
555 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
556 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
557 else
558 return 0;
559 break;
561 case SIGN_EXTEND:
562 if (op_mode == VOIDmode)
563 op_mode = mode;
564 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
566 /* If we were really extending the mode,
567 we would have to distinguish between zero-extension
568 and sign-extension. */
569 if (width != GET_MODE_BITSIZE (op_mode))
570 abort ();
571 val = arg0;
573 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
576 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
577 if (val
578 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
579 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
581 else
582 return 0;
583 break;
585 case SQRT:
586 case FLOAT_EXTEND:
587 case FLOAT_TRUNCATE:
588 case SS_TRUNCATE:
589 case US_TRUNCATE:
590 return 0;
592 default:
593 abort ();
596 val = trunc_int_for_mode (val, mode);
598 return GEN_INT (val);
601 /* We can do some operations on integer CONST_DOUBLEs. Also allow
602 for a DImode operation on a CONST_INT. */
603 else if (GET_MODE (trueop) == VOIDmode
604 && width <= HOST_BITS_PER_WIDE_INT * 2
605 && (GET_CODE (trueop) == CONST_DOUBLE
606 || GET_CODE (trueop) == CONST_INT))
608 unsigned HOST_WIDE_INT l1, lv;
609 HOST_WIDE_INT h1, hv;
611 if (GET_CODE (trueop) == CONST_DOUBLE)
612 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
613 else
614 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
616 switch (code)
618 case NOT:
619 lv = ~ l1;
620 hv = ~ h1;
621 break;
623 case NEG:
624 neg_double (l1, h1, &lv, &hv);
625 break;
627 case ABS:
628 if (h1 < 0)
629 neg_double (l1, h1, &lv, &hv);
630 else
631 lv = l1, hv = h1;
632 break;
634 case FFS:
635 hv = 0;
636 if (l1 == 0)
638 if (h1 == 0)
639 lv = 0;
640 else
641 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
643 else
644 lv = exact_log2 (l1 & -l1) + 1;
645 break;
647 case CLZ:
648 hv = 0;
649 if (h1 != 0)
650 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
651 - HOST_BITS_PER_WIDE_INT;
652 else if (l1 != 0)
653 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
654 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
655 lv = GET_MODE_BITSIZE (mode);
656 break;
658 case CTZ:
659 hv = 0;
660 if (l1 != 0)
661 lv = exact_log2 (l1 & -l1);
662 else if (h1 != 0)
663 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
664 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
665 lv = GET_MODE_BITSIZE (mode);
666 break;
668 case POPCOUNT:
669 hv = 0;
670 lv = 0;
671 while (l1)
672 lv++, l1 &= l1 - 1;
673 while (h1)
674 lv++, h1 &= h1 - 1;
675 break;
677 case PARITY:
678 hv = 0;
679 lv = 0;
680 while (l1)
681 lv++, l1 &= l1 - 1;
682 while (h1)
683 lv++, h1 &= h1 - 1;
684 lv &= 1;
685 break;
687 case TRUNCATE:
688 /* This is just a change-of-mode, so do nothing. */
689 lv = l1, hv = h1;
690 break;
692 case ZERO_EXTEND:
693 if (op_mode == VOIDmode)
694 abort ();
696 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
697 return 0;
699 hv = 0;
700 lv = l1 & GET_MODE_MASK (op_mode);
701 break;
703 case SIGN_EXTEND:
704 if (op_mode == VOIDmode
705 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
706 return 0;
707 else
709 lv = l1 & GET_MODE_MASK (op_mode);
710 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
711 && (lv & ((HOST_WIDE_INT) 1
712 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
713 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
715 hv = HWI_SIGN_EXTEND (lv);
717 break;
719 case SQRT:
720 return 0;
722 default:
723 return 0;
726 return immed_double_const (lv, hv, mode);
729 else if (GET_CODE (trueop) == CONST_DOUBLE
730 && GET_MODE_CLASS (mode) == MODE_FLOAT)
732 REAL_VALUE_TYPE d, t;
733 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
735 switch (code)
737 case SQRT:
738 if (HONOR_SNANS (mode) && real_isnan (&d))
739 return 0;
740 real_sqrt (&t, mode, &d);
741 d = t;
742 break;
743 case ABS:
744 d = REAL_VALUE_ABS (d);
745 break;
746 case NEG:
747 d = REAL_VALUE_NEGATE (d);
748 break;
749 case FLOAT_TRUNCATE:
750 d = real_value_truncate (mode, d);
751 break;
752 case FLOAT_EXTEND:
753 /* All this does is change the mode. */
754 break;
755 case FIX:
756 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
757 break;
758 case NOT:
760 long tmp[4];
761 int i;
763 real_to_target (tmp, &d, GET_MODE (trueop));
764 for (i = 0; i < 4; i++)
765 tmp[i] = ~tmp[i];
766 real_from_target (&d, tmp, mode);
768 default:
769 abort ();
771 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
774 else if (GET_CODE (trueop) == CONST_DOUBLE
775 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
776 && GET_MODE_CLASS (mode) == MODE_INT
777 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
779 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
780 operators are intentionally left unspecified (to ease implementation
781 by target backends), for consistency, this routine implements the
782 same semantics for constant folding as used by the middle-end. */
784 HOST_WIDE_INT xh, xl, th, tl;
785 REAL_VALUE_TYPE x, t;
786 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
787 switch (code)
789 case FIX:
790 if (REAL_VALUE_ISNAN (x))
791 return const0_rtx;
793 /* Test against the signed upper bound. */
794 if (width > HOST_BITS_PER_WIDE_INT)
796 th = ((unsigned HOST_WIDE_INT) 1
797 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
798 tl = -1;
800 else
802 th = 0;
803 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
805 real_from_integer (&t, VOIDmode, tl, th, 0);
806 if (REAL_VALUES_LESS (t, x))
808 xh = th;
809 xl = tl;
810 break;
813 /* Test against the signed lower bound. */
814 if (width > HOST_BITS_PER_WIDE_INT)
816 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
817 tl = 0;
819 else
821 th = -1;
822 tl = (HOST_WIDE_INT) -1 << (width - 1);
824 real_from_integer (&t, VOIDmode, tl, th, 0);
825 if (REAL_VALUES_LESS (x, t))
827 xh = th;
828 xl = tl;
829 break;
831 REAL_VALUE_TO_INT (&xl, &xh, x);
832 break;
834 case UNSIGNED_FIX:
835 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
836 return const0_rtx;
838 /* Test against the unsigned upper bound. */
839 if (width == 2*HOST_BITS_PER_WIDE_INT)
841 th = -1;
842 tl = -1;
844 else if (width >= HOST_BITS_PER_WIDE_INT)
846 th = ((unsigned HOST_WIDE_INT) 1
847 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
848 tl = -1;
850 else
852 th = 0;
853 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
855 real_from_integer (&t, VOIDmode, tl, th, 1);
856 if (REAL_VALUES_LESS (t, x))
858 xh = th;
859 xl = tl;
860 break;
863 REAL_VALUE_TO_INT (&xl, &xh, x);
864 break;
866 default:
867 abort ();
869 return immed_double_const (xl, xh, mode);
872 /* This was formerly used only for non-IEEE float.
873 eggert@twinsun.com says it is safe for IEEE also. */
874 else
876 enum rtx_code reversed;
877 rtx temp;
879 /* There are some simplifications we can do even if the operands
880 aren't constant. */
881 switch (code)
883 case NOT:
884 /* (not (not X)) == X. */
885 if (GET_CODE (op) == NOT)
886 return XEXP (op, 0);
888 /* (not (eq X Y)) == (ne X Y), etc. */
889 if (COMPARISON_P (op)
890 && (mode == BImode || STORE_FLAG_VALUE == -1)
891 && ((reversed = reversed_comparison_code (op, NULL_RTX))
892 != UNKNOWN))
893 return simplify_gen_relational (reversed, mode, VOIDmode,
894 XEXP (op, 0), XEXP (op, 1));
896 /* (not (plus X -1)) can become (neg X). */
897 if (GET_CODE (op) == PLUS
898 && XEXP (op, 1) == constm1_rtx)
899 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
901 /* Similarly, (not (neg X)) is (plus X -1). */
902 if (GET_CODE (op) == NEG)
903 return plus_constant (XEXP (op, 0), -1);
905 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
906 if (GET_CODE (op) == XOR
907 && GET_CODE (XEXP (op, 1)) == CONST_INT
908 && (temp = simplify_unary_operation (NOT, mode,
909 XEXP (op, 1),
910 mode)) != 0)
911 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
913 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
914 if (GET_CODE (op) == PLUS
915 && GET_CODE (XEXP (op, 1)) == CONST_INT
916 && mode_signbit_p (mode, XEXP (op, 1))
917 && (temp = simplify_unary_operation (NOT, mode,
918 XEXP (op, 1),
919 mode)) != 0)
920 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
924 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
925 operands other than 1, but that is not valid. We could do a
926 similar simplification for (not (lshiftrt C X)) where C is
927 just the sign bit, but this doesn't seem common enough to
928 bother with. */
929 if (GET_CODE (op) == ASHIFT
930 && XEXP (op, 0) == const1_rtx)
932 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
933 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
936 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
937 by reversing the comparison code if valid. */
938 if (STORE_FLAG_VALUE == -1
939 && COMPARISON_P (op)
940 && (reversed = reversed_comparison_code (op, NULL_RTX))
941 != UNKNOWN)
942 return simplify_gen_relational (reversed, mode, VOIDmode,
943 XEXP (op, 0), XEXP (op, 1));
945 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
946 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
947 so we can perform the above simplification. */
949 if (STORE_FLAG_VALUE == -1
950 && GET_CODE (op) == ASHIFTRT
951 && GET_CODE (XEXP (op, 1)) == CONST_INT
952 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
953 return simplify_gen_relational (GE, mode, VOIDmode,
954 XEXP (op, 0), const0_rtx);
956 break;
958 case NEG:
959 /* (neg (neg X)) == X. */
960 if (GET_CODE (op) == NEG)
961 return XEXP (op, 0);
963 /* (neg (plus X 1)) can become (not X). */
964 if (GET_CODE (op) == PLUS
965 && XEXP (op, 1) == const1_rtx)
966 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
968 /* Similarly, (neg (not X)) is (plus X 1). */
969 if (GET_CODE (op) == NOT)
970 return plus_constant (XEXP (op, 0), 1);
972 /* (neg (minus X Y)) can become (minus Y X). This transformation
973 isn't safe for modes with signed zeros, since if X and Y are
974 both +0, (minus Y X) is the same as (minus X Y). If the
975 rounding mode is towards +infinity (or -infinity) then the two
976 expressions will be rounded differently. */
977 if (GET_CODE (op) == MINUS
978 && !HONOR_SIGNED_ZEROS (mode)
979 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
980 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
981 XEXP (op, 0));
983 if (GET_CODE (op) == PLUS
984 && !HONOR_SIGNED_ZEROS (mode)
985 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
987 /* (neg (plus A C)) is simplified to (minus -C A). */
988 if (GET_CODE (XEXP (op, 1)) == CONST_INT
989 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
991 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
992 mode);
993 if (temp)
994 return simplify_gen_binary (MINUS, mode, temp,
995 XEXP (op, 0));
998 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
999 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1000 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1003 /* (neg (mult A B)) becomes (mult (neg A) B).
1004 This works even for floating-point values. */
1005 if (GET_CODE (op) == MULT
1006 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1008 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1009 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1012 /* NEG commutes with ASHIFT since it is multiplication. Only do
1013 this if we can then eliminate the NEG (e.g., if the operand
1014 is a constant). */
1015 if (GET_CODE (op) == ASHIFT)
1017 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1018 mode);
1019 if (temp)
1020 return simplify_gen_binary (ASHIFT, mode, temp,
1021 XEXP (op, 1));
1024 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1025 C is equal to the width of MODE minus 1. */
1026 if (GET_CODE (op) == ASHIFTRT
1027 && GET_CODE (XEXP (op, 1)) == CONST_INT
1028 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1029 return simplify_gen_binary (LSHIFTRT, mode,
1030 XEXP (op, 0), XEXP (op, 1));
1032 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1033 C is equal to the width of MODE minus 1. */
1034 if (GET_CODE (op) == LSHIFTRT
1035 && GET_CODE (XEXP (op, 1)) == CONST_INT
1036 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1037 return simplify_gen_binary (ASHIFTRT, mode,
1038 XEXP (op, 0), XEXP (op, 1));
1040 break;
1042 case SIGN_EXTEND:
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1046 the VAX). */
1047 if (GET_CODE (op) == TRUNCATE
1048 && GET_MODE (XEXP (op, 0)) == mode
1049 && GET_CODE (XEXP (op, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1052 return XEXP (op, 0);
1054 /* Check for a sign extension of a subreg of a promoted
1055 variable, where the promotion is sign-extended, and the
1056 target mode is the same as the variable's promotion. */
1057 if (GET_CODE (op) == SUBREG
1058 && SUBREG_PROMOTED_VAR_P (op)
1059 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1060 && GET_MODE (XEXP (op, 0)) == mode)
1061 return XEXP (op, 0);
1063 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1064 if (! POINTERS_EXTEND_UNSIGNED
1065 && mode == Pmode && GET_MODE (op) == ptr_mode
1066 && (CONSTANT_P (op)
1067 || (GET_CODE (op) == SUBREG
1068 && REG_P (SUBREG_REG (op))
1069 && REG_POINTER (SUBREG_REG (op))
1070 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1071 return convert_memory_address (Pmode, op);
1072 #endif
1073 break;
1075 case ZERO_EXTEND:
1076 /* Check for a zero extension of a subreg of a promoted
1077 variable, where the promotion is zero-extended, and the
1078 target mode is the same as the variable's promotion. */
1079 if (GET_CODE (op) == SUBREG
1080 && SUBREG_PROMOTED_VAR_P (op)
1081 && SUBREG_PROMOTED_UNSIGNED_P (op)
1082 && GET_MODE (XEXP (op, 0)) == mode)
1083 return XEXP (op, 0);
1085 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1086 if (POINTERS_EXTEND_UNSIGNED > 0
1087 && mode == Pmode && GET_MODE (op) == ptr_mode
1088 && (CONSTANT_P (op)
1089 || (GET_CODE (op) == SUBREG
1090 && REG_P (SUBREG_REG (op))
1091 && REG_POINTER (SUBREG_REG (op))
1092 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1093 return convert_memory_address (Pmode, op);
1094 #endif
1095 break;
1097 default:
1098 break;
1101 return 0;
1105 /* Subroutine of simplify_binary_operation to simplify a commutative,
1106 associative binary operation CODE with result mode MODE, operating
1107 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1108 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1109 canonicalization is possible. */
1111 static rtx
1112 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1113 rtx op0, rtx op1)
1115 rtx tem;
1117 /* Linearize the operator to the left. */
1118 if (GET_CODE (op1) == code)
1120 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1121 if (GET_CODE (op0) == code)
1123 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1124 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1127 /* "a op (b op c)" becomes "(b op c) op a". */
1128 if (! swap_commutative_operands_p (op1, op0))
1129 return simplify_gen_binary (code, mode, op1, op0);
1131 tem = op0;
1132 op0 = op1;
1133 op1 = tem;
1136 if (GET_CODE (op0) == code)
1138 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1139 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1141 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1142 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1145 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1146 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1147 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1148 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1149 if (tem != 0)
1150 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1152 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1153 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1154 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1155 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1156 if (tem != 0)
1157 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1160 return 0;
1163 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1164 and OP1. Return 0 if no simplification is possible.
1166 Don't use this for relational operations such as EQ or LT.
1167 Use simplify_relational_operation instead. */
1169 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1170 rtx op0, rtx op1)
1172 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1173 HOST_WIDE_INT val;
1174 unsigned int width = GET_MODE_BITSIZE (mode);
1175 rtx trueop0, trueop1;
1176 rtx tem;
1178 #ifdef ENABLE_CHECKING
1179 /* Relational operations don't work here. We must know the mode
1180 of the operands in order to do the comparison correctly.
1181 Assuming a full word can give incorrect results.
1182 Consider comparing 128 with -128 in QImode. */
1184 if (GET_RTX_CLASS (code) == RTX_COMPARE
1185 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
1186 abort ();
1187 #endif
1189 /* Make sure the constant is second. */
1190 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1191 && swap_commutative_operands_p (op0, op1))
1193 tem = op0, op0 = op1, op1 = tem;
1196 trueop0 = avoid_constant_pool_reference (op0);
1197 trueop1 = avoid_constant_pool_reference (op1);
1199 if (VECTOR_MODE_P (mode)
1200 && GET_CODE (trueop0) == CONST_VECTOR
1201 && GET_CODE (trueop1) == CONST_VECTOR)
1203 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1204 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1205 enum machine_mode op0mode = GET_MODE (trueop0);
1206 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1207 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1208 enum machine_mode op1mode = GET_MODE (trueop1);
1209 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1210 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1211 rtvec v = rtvec_alloc (n_elts);
1212 unsigned int i;
1214 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1215 abort ();
1217 for (i = 0; i < n_elts; i++)
1219 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1220 CONST_VECTOR_ELT (trueop0, i),
1221 CONST_VECTOR_ELT (trueop1, i));
1222 if (!x)
1223 return 0;
1224 RTVEC_ELT (v, i) = x;
1227 return gen_rtx_CONST_VECTOR (mode, v);
1230 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1231 && GET_CODE (trueop0) == CONST_DOUBLE
1232 && GET_CODE (trueop1) == CONST_DOUBLE
1233 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1235 if (code == AND
1236 || code == IOR
1237 || code == XOR)
1239 long tmp0[4];
1240 long tmp1[4];
1241 REAL_VALUE_TYPE r;
1242 int i;
1244 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1245 GET_MODE (op0));
1246 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1247 GET_MODE (op1));
1248 for (i = 0; i < 4; i++)
1250 if (code == AND)
1251 tmp0[i] &= tmp1[i];
1252 else if (code == IOR)
1253 tmp0[i] |= tmp1[i];
1254 else if (code == XOR)
1255 tmp0[i] ^= tmp1[i];
1256 else
1257 abort ();
1259 real_from_target (&r, tmp0, mode);
1260 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1262 else
1264 REAL_VALUE_TYPE f0, f1, value;
1266 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1267 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1268 f0 = real_value_truncate (mode, f0);
1269 f1 = real_value_truncate (mode, f1);
1271 if (HONOR_SNANS (mode)
1272 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1273 return 0;
1275 if (code == DIV
1276 && REAL_VALUES_EQUAL (f1, dconst0)
1277 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1278 return 0;
1280 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1281 && flag_trapping_math
1282 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1284 int s0 = REAL_VALUE_NEGATIVE (f0);
1285 int s1 = REAL_VALUE_NEGATIVE (f1);
1287 switch (code)
1289 case PLUS:
1290 /* Inf + -Inf = NaN plus exception. */
1291 if (s0 != s1)
1292 return 0;
1293 break;
1294 case MINUS:
1295 /* Inf - Inf = NaN plus exception. */
1296 if (s0 == s1)
1297 return 0;
1298 break;
1299 case DIV:
1300 /* Inf / Inf = NaN plus exception. */
1301 return 0;
1302 default:
1303 break;
1307 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1308 && flag_trapping_math
1309 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1310 || (REAL_VALUE_ISINF (f1)
1311 && REAL_VALUES_EQUAL (f0, dconst0))))
1312 /* Inf * 0 = NaN plus exception. */
1313 return 0;
1315 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1317 value = real_value_truncate (mode, value);
1318 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1322 /* We can fold some multi-word operations. */
1323 if (GET_MODE_CLASS (mode) == MODE_INT
1324 && width == HOST_BITS_PER_WIDE_INT * 2
1325 && (GET_CODE (trueop0) == CONST_DOUBLE
1326 || GET_CODE (trueop0) == CONST_INT)
1327 && (GET_CODE (trueop1) == CONST_DOUBLE
1328 || GET_CODE (trueop1) == CONST_INT))
1330 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1331 HOST_WIDE_INT h1, h2, hv, ht;
1333 if (GET_CODE (trueop0) == CONST_DOUBLE)
1334 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1335 else
1336 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1338 if (GET_CODE (trueop1) == CONST_DOUBLE)
1339 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1340 else
1341 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1343 switch (code)
1345 case MINUS:
1346 /* A - B == A + (-B). */
1347 neg_double (l2, h2, &lv, &hv);
1348 l2 = lv, h2 = hv;
1350 /* Fall through.... */
1352 case PLUS:
1353 add_double (l1, h1, l2, h2, &lv, &hv);
1354 break;
1356 case MULT:
1357 mul_double (l1, h1, l2, h2, &lv, &hv);
1358 break;
1360 case DIV:
1361 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1362 &lv, &hv, &lt, &ht))
1363 return 0;
1364 break;
1366 case MOD:
1367 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1368 &lt, &ht, &lv, &hv))
1369 return 0;
1370 break;
1372 case UDIV:
1373 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1374 &lv, &hv, &lt, &ht))
1375 return 0;
1376 break;
1378 case UMOD:
1379 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1380 &lt, &ht, &lv, &hv))
1381 return 0;
1382 break;
1384 case AND:
1385 lv = l1 & l2, hv = h1 & h2;
1386 break;
1388 case IOR:
1389 lv = l1 | l2, hv = h1 | h2;
1390 break;
1392 case XOR:
1393 lv = l1 ^ l2, hv = h1 ^ h2;
1394 break;
1396 case SMIN:
1397 if (h1 < h2
1398 || (h1 == h2
1399 && ((unsigned HOST_WIDE_INT) l1
1400 < (unsigned HOST_WIDE_INT) l2)))
1401 lv = l1, hv = h1;
1402 else
1403 lv = l2, hv = h2;
1404 break;
1406 case SMAX:
1407 if (h1 > h2
1408 || (h1 == h2
1409 && ((unsigned HOST_WIDE_INT) l1
1410 > (unsigned HOST_WIDE_INT) l2)))
1411 lv = l1, hv = h1;
1412 else
1413 lv = l2, hv = h2;
1414 break;
1416 case UMIN:
1417 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1418 || (h1 == h2
1419 && ((unsigned HOST_WIDE_INT) l1
1420 < (unsigned HOST_WIDE_INT) l2)))
1421 lv = l1, hv = h1;
1422 else
1423 lv = l2, hv = h2;
1424 break;
1426 case UMAX:
1427 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1428 || (h1 == h2
1429 && ((unsigned HOST_WIDE_INT) l1
1430 > (unsigned HOST_WIDE_INT) l2)))
1431 lv = l1, hv = h1;
1432 else
1433 lv = l2, hv = h2;
1434 break;
1436 case LSHIFTRT: case ASHIFTRT:
1437 case ASHIFT:
1438 case ROTATE: case ROTATERT:
1439 if (SHIFT_COUNT_TRUNCATED)
1440 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1442 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1443 return 0;
1445 if (code == LSHIFTRT || code == ASHIFTRT)
1446 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1447 code == ASHIFTRT);
1448 else if (code == ASHIFT)
1449 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1450 else if (code == ROTATE)
1451 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1452 else /* code == ROTATERT */
1453 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1454 break;
1456 default:
1457 return 0;
1460 return immed_double_const (lv, hv, mode);
1463 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1464 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1466 /* Even if we can't compute a constant result,
1467 there are some cases worth simplifying. */
1469 switch (code)
1471 case PLUS:
1472 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1473 when x is NaN, infinite, or finite and nonzero. They aren't
1474 when x is -0 and the rounding mode is not towards -infinity,
1475 since (-0) + 0 is then 0. */
1476 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1477 return op0;
1479 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1480 transformations are safe even for IEEE. */
1481 if (GET_CODE (op0) == NEG)
1482 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1483 else if (GET_CODE (op1) == NEG)
1484 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1486 /* (~a) + 1 -> -a */
1487 if (INTEGRAL_MODE_P (mode)
1488 && GET_CODE (op0) == NOT
1489 && trueop1 == const1_rtx)
1490 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1492 /* Handle both-operands-constant cases. We can only add
1493 CONST_INTs to constants since the sum of relocatable symbols
1494 can't be handled by most assemblers. Don't add CONST_INT
1495 to CONST_INT since overflow won't be computed properly if wider
1496 than HOST_BITS_PER_WIDE_INT. */
1498 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1499 && GET_CODE (op1) == CONST_INT)
1500 return plus_constant (op0, INTVAL (op1));
1501 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1502 && GET_CODE (op0) == CONST_INT)
1503 return plus_constant (op1, INTVAL (op0));
1505 /* See if this is something like X * C - X or vice versa or
1506 if the multiplication is written as a shift. If so, we can
1507 distribute and make a new multiply, shift, or maybe just
1508 have X (if C is 2 in the example above). But don't make
1509 real multiply if we didn't have one before. */
1511 if (! FLOAT_MODE_P (mode))
1513 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1514 rtx lhs = op0, rhs = op1;
1515 int had_mult = 0;
1517 if (GET_CODE (lhs) == NEG)
1518 coeff0 = -1, lhs = XEXP (lhs, 0);
1519 else if (GET_CODE (lhs) == MULT
1520 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1522 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1523 had_mult = 1;
1525 else if (GET_CODE (lhs) == ASHIFT
1526 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1527 && INTVAL (XEXP (lhs, 1)) >= 0
1528 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1530 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1531 lhs = XEXP (lhs, 0);
1534 if (GET_CODE (rhs) == NEG)
1535 coeff1 = -1, rhs = XEXP (rhs, 0);
1536 else if (GET_CODE (rhs) == MULT
1537 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1539 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1540 had_mult = 1;
1542 else if (GET_CODE (rhs) == ASHIFT
1543 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1544 && INTVAL (XEXP (rhs, 1)) >= 0
1545 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1547 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1548 rhs = XEXP (rhs, 0);
1551 if (rtx_equal_p (lhs, rhs))
1553 tem = simplify_gen_binary (MULT, mode, lhs,
1554 GEN_INT (coeff0 + coeff1));
1555 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1559 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1560 if ((GET_CODE (op1) == CONST_INT
1561 || GET_CODE (op1) == CONST_DOUBLE)
1562 && GET_CODE (op0) == XOR
1563 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1564 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1565 && mode_signbit_p (mode, op1))
1566 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1567 simplify_gen_binary (XOR, mode, op1,
1568 XEXP (op0, 1)));
1570 /* If one of the operands is a PLUS or a MINUS, see if we can
1571 simplify this by the associative law.
1572 Don't use the associative law for floating point.
1573 The inaccuracy makes it nonassociative,
1574 and subtle programs can break if operations are associated. */
1576 if (INTEGRAL_MODE_P (mode)
1577 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1578 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1579 || (GET_CODE (op0) == CONST
1580 && GET_CODE (XEXP (op0, 0)) == PLUS)
1581 || (GET_CODE (op1) == CONST
1582 && GET_CODE (XEXP (op1, 0)) == PLUS))
1583 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1584 return tem;
1586 /* Reassociate floating point addition only when the user
1587 specifies unsafe math optimizations. */
1588 if (FLOAT_MODE_P (mode)
1589 && flag_unsafe_math_optimizations)
1591 tem = simplify_associative_operation (code, mode, op0, op1);
1592 if (tem)
1593 return tem;
1595 break;
1597 case COMPARE:
1598 #ifdef HAVE_cc0
1599 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1600 using cc0, in which case we want to leave it as a COMPARE
1601 so we can distinguish it from a register-register-copy.
1603 In IEEE floating point, x-0 is not the same as x. */
1605 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1606 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1607 && trueop1 == CONST0_RTX (mode))
1608 return op0;
1609 #endif
1611 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1612 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1613 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1614 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1616 rtx xop00 = XEXP (op0, 0);
1617 rtx xop10 = XEXP (op1, 0);
1619 #ifdef HAVE_cc0
1620 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1621 #else
1622 if (REG_P (xop00) && REG_P (xop10)
1623 && GET_MODE (xop00) == GET_MODE (xop10)
1624 && REGNO (xop00) == REGNO (xop10)
1625 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1626 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1627 #endif
1628 return xop00;
1630 break;
1632 case MINUS:
1633 /* We can't assume x-x is 0 even with non-IEEE floating point,
1634 but since it is zero except in very strange circumstances, we
1635 will treat it as zero with -funsafe-math-optimizations. */
1636 if (rtx_equal_p (trueop0, trueop1)
1637 && ! side_effects_p (op0)
1638 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1639 return CONST0_RTX (mode);
1641 /* Change subtraction from zero into negation. (0 - x) is the
1642 same as -x when x is NaN, infinite, or finite and nonzero.
1643 But if the mode has signed zeros, and does not round towards
1644 -infinity, then 0 - 0 is 0, not -0. */
1645 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1646 return simplify_gen_unary (NEG, mode, op1, mode);
1648 /* (-1 - a) is ~a. */
1649 if (trueop0 == constm1_rtx)
1650 return simplify_gen_unary (NOT, mode, op1, mode);
1652 /* Subtracting 0 has no effect unless the mode has signed zeros
1653 and supports rounding towards -infinity. In such a case,
1654 0 - 0 is -0. */
1655 if (!(HONOR_SIGNED_ZEROS (mode)
1656 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1657 && trueop1 == CONST0_RTX (mode))
1658 return op0;
1660 /* See if this is something like X * C - X or vice versa or
1661 if the multiplication is written as a shift. If so, we can
1662 distribute and make a new multiply, shift, or maybe just
1663 have X (if C is 2 in the example above). But don't make
1664 real multiply if we didn't have one before. */
1666 if (! FLOAT_MODE_P (mode))
1668 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1669 rtx lhs = op0, rhs = op1;
1670 int had_mult = 0;
1672 if (GET_CODE (lhs) == NEG)
1673 coeff0 = -1, lhs = XEXP (lhs, 0);
1674 else if (GET_CODE (lhs) == MULT
1675 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1677 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1678 had_mult = 1;
1680 else if (GET_CODE (lhs) == ASHIFT
1681 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1682 && INTVAL (XEXP (lhs, 1)) >= 0
1683 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1685 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1686 lhs = XEXP (lhs, 0);
1689 if (GET_CODE (rhs) == NEG)
1690 coeff1 = - 1, rhs = XEXP (rhs, 0);
1691 else if (GET_CODE (rhs) == MULT
1692 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1694 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1695 had_mult = 1;
1697 else if (GET_CODE (rhs) == ASHIFT
1698 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1699 && INTVAL (XEXP (rhs, 1)) >= 0
1700 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1702 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1703 rhs = XEXP (rhs, 0);
1706 if (rtx_equal_p (lhs, rhs))
1708 tem = simplify_gen_binary (MULT, mode, lhs,
1709 GEN_INT (coeff0 - coeff1));
1710 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1714 /* (a - (-b)) -> (a + b). True even for IEEE. */
1715 if (GET_CODE (op1) == NEG)
1716 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1718 /* (-x - c) may be simplified as (-c - x). */
1719 if (GET_CODE (op0) == NEG
1720 && (GET_CODE (op1) == CONST_INT
1721 || GET_CODE (op1) == CONST_DOUBLE))
1723 tem = simplify_unary_operation (NEG, mode, op1, mode);
1724 if (tem)
1725 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1728 /* If one of the operands is a PLUS or a MINUS, see if we can
1729 simplify this by the associative law.
1730 Don't use the associative law for floating point.
1731 The inaccuracy makes it nonassociative,
1732 and subtle programs can break if operations are associated. */
1734 if (INTEGRAL_MODE_P (mode)
1735 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1736 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1737 || (GET_CODE (op0) == CONST
1738 && GET_CODE (XEXP (op0, 0)) == PLUS)
1739 || (GET_CODE (op1) == CONST
1740 && GET_CODE (XEXP (op1, 0)) == PLUS))
1741 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1742 return tem;
1744 /* Don't let a relocatable value get a negative coeff. */
1745 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1746 return simplify_gen_binary (PLUS, mode,
1747 op0,
1748 neg_const_int (mode, op1));
1750 /* (x - (x & y)) -> (x & ~y) */
1751 if (GET_CODE (op1) == AND)
1753 if (rtx_equal_p (op0, XEXP (op1, 0)))
1755 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1756 GET_MODE (XEXP (op1, 1)));
1757 return simplify_gen_binary (AND, mode, op0, tem);
1759 if (rtx_equal_p (op0, XEXP (op1, 1)))
1761 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1762 GET_MODE (XEXP (op1, 0)));
1763 return simplify_gen_binary (AND, mode, op0, tem);
1766 break;
1768 case MULT:
1769 if (trueop1 == constm1_rtx)
1770 return simplify_gen_unary (NEG, mode, op0, mode);
1772 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1773 x is NaN, since x * 0 is then also NaN. Nor is it valid
1774 when the mode has signed zeros, since multiplying a negative
1775 number by 0 will give -0, not 0. */
1776 if (!HONOR_NANS (mode)
1777 && !HONOR_SIGNED_ZEROS (mode)
1778 && trueop1 == CONST0_RTX (mode)
1779 && ! side_effects_p (op0))
1780 return op1;
1782 /* In IEEE floating point, x*1 is not equivalent to x for
1783 signalling NaNs. */
1784 if (!HONOR_SNANS (mode)
1785 && trueop1 == CONST1_RTX (mode))
1786 return op0;
1788 /* Convert multiply by constant power of two into shift unless
1789 we are still generating RTL. This test is a kludge. */
1790 if (GET_CODE (trueop1) == CONST_INT
1791 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1792 /* If the mode is larger than the host word size, and the
1793 uppermost bit is set, then this isn't a power of two due
1794 to implicit sign extension. */
1795 && (width <= HOST_BITS_PER_WIDE_INT
1796 || val != HOST_BITS_PER_WIDE_INT - 1)
1797 && ! rtx_equal_function_value_matters)
1798 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1800 /* x*2 is x+x and x*(-1) is -x */
1801 if (GET_CODE (trueop1) == CONST_DOUBLE
1802 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1803 && GET_MODE (op0) == mode)
1805 REAL_VALUE_TYPE d;
1806 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1808 if (REAL_VALUES_EQUAL (d, dconst2))
1809 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1811 if (REAL_VALUES_EQUAL (d, dconstm1))
1812 return simplify_gen_unary (NEG, mode, op0, mode);
1815 /* Reassociate multiplication, but for floating point MULTs
1816 only when the user specifies unsafe math optimizations. */
1817 if (! FLOAT_MODE_P (mode)
1818 || flag_unsafe_math_optimizations)
1820 tem = simplify_associative_operation (code, mode, op0, op1);
1821 if (tem)
1822 return tem;
1824 break;
1826 case IOR:
1827 if (trueop1 == const0_rtx)
1828 return op0;
1829 if (GET_CODE (trueop1) == CONST_INT
1830 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1831 == GET_MODE_MASK (mode)))
1832 return op1;
1833 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1834 return op0;
1835 /* A | (~A) -> -1 */
1836 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1837 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1838 && ! side_effects_p (op0)
1839 && GET_MODE_CLASS (mode) != MODE_CC)
1840 return constm1_rtx;
1841 tem = simplify_associative_operation (code, mode, op0, op1);
1842 if (tem)
1843 return tem;
1844 break;
1846 case XOR:
1847 if (trueop1 == const0_rtx)
1848 return op0;
1849 if (GET_CODE (trueop1) == CONST_INT
1850 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1851 == GET_MODE_MASK (mode)))
1852 return simplify_gen_unary (NOT, mode, op0, mode);
1853 if (trueop0 == trueop1
1854 && ! side_effects_p (op0)
1855 && GET_MODE_CLASS (mode) != MODE_CC)
1856 return const0_rtx;
1858 /* Canonicalize XOR of the most significant bit to PLUS. */
1859 if ((GET_CODE (op1) == CONST_INT
1860 || GET_CODE (op1) == CONST_DOUBLE)
1861 && mode_signbit_p (mode, op1))
1862 return simplify_gen_binary (PLUS, mode, op0, op1);
1863 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1864 if ((GET_CODE (op1) == CONST_INT
1865 || GET_CODE (op1) == CONST_DOUBLE)
1866 && GET_CODE (op0) == PLUS
1867 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1868 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1869 && mode_signbit_p (mode, XEXP (op0, 1)))
1870 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1871 simplify_gen_binary (XOR, mode, op1,
1872 XEXP (op0, 1)));
1874 tem = simplify_associative_operation (code, mode, op0, op1);
1875 if (tem)
1876 return tem;
1877 break;
1879 case AND:
1880 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1881 return const0_rtx;
1882 /* If we are turning off bits already known off in OP0, we need
1883 not do an AND. */
1884 if (GET_CODE (trueop1) == CONST_INT
1885 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1886 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1887 return op0;
1888 if (trueop0 == trueop1 && ! side_effects_p (op0)
1889 && GET_MODE_CLASS (mode) != MODE_CC)
1890 return op0;
1891 /* A & (~A) -> 0 */
1892 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1893 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1894 && ! side_effects_p (op0)
1895 && GET_MODE_CLASS (mode) != MODE_CC)
1896 return const0_rtx;
1897 tem = simplify_associative_operation (code, mode, op0, op1);
1898 if (tem)
1899 return tem;
1900 break;
1902 case UDIV:
1903 /* 0/x is 0 (or x&0 if x has side-effects). */
1904 if (trueop0 == const0_rtx)
1905 return side_effects_p (op1)
1906 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1907 : const0_rtx;
1908 /* x/1 is x. */
1909 if (trueop1 == const1_rtx)
1911 /* Handle narrowing UDIV. */
1912 rtx x = gen_lowpart_common (mode, op0);
1913 if (x)
1914 return x;
1915 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1916 return gen_lowpart_SUBREG (mode, op0);
1917 return op0;
1919 /* Convert divide by power of two into shift. */
1920 if (GET_CODE (trueop1) == CONST_INT
1921 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1922 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1923 break;
1925 case DIV:
1926 /* Handle floating point and integers separately. */
1927 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1929 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1930 safe for modes with NaNs, since 0.0 / 0.0 will then be
1931 NaN rather than 0.0. Nor is it safe for modes with signed
1932 zeros, since dividing 0 by a negative number gives -0.0 */
1933 if (trueop0 == CONST0_RTX (mode)
1934 && !HONOR_NANS (mode)
1935 && !HONOR_SIGNED_ZEROS (mode)
1936 && ! side_effects_p (op1))
1937 return op0;
1938 /* x/1.0 is x. */
1939 if (trueop1 == CONST1_RTX (mode)
1940 && !HONOR_SNANS (mode))
1941 return op0;
1943 if (GET_CODE (trueop1) == CONST_DOUBLE
1944 && trueop1 != CONST0_RTX (mode))
1946 REAL_VALUE_TYPE d;
1947 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1949 /* x/-1.0 is -x. */
1950 if (REAL_VALUES_EQUAL (d, dconstm1)
1951 && !HONOR_SNANS (mode))
1952 return simplify_gen_unary (NEG, mode, op0, mode);
1954 /* Change FP division by a constant into multiplication.
1955 Only do this with -funsafe-math-optimizations. */
1956 if (flag_unsafe_math_optimizations
1957 && !REAL_VALUES_EQUAL (d, dconst0))
1959 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1960 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1961 return simplify_gen_binary (MULT, mode, op0, tem);
1965 else
1967 /* 0/x is 0 (or x&0 if x has side-effects). */
1968 if (trueop0 == const0_rtx)
1969 return side_effects_p (op1)
1970 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1971 : const0_rtx;
1972 /* x/1 is x. */
1973 if (trueop1 == const1_rtx)
1975 /* Handle narrowing DIV. */
1976 rtx x = gen_lowpart_common (mode, op0);
1977 if (x)
1978 return x;
1979 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1980 return gen_lowpart_SUBREG (mode, op0);
1981 return op0;
1983 /* x/-1 is -x. */
1984 if (trueop1 == constm1_rtx)
1986 rtx x = gen_lowpart_common (mode, op0);
1987 if (!x)
1988 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1989 ? gen_lowpart_SUBREG (mode, op0) : op0;
1990 return simplify_gen_unary (NEG, mode, x, mode);
1993 break;
1995 case UMOD:
1996 /* 0%x is 0 (or x&0 if x has side-effects). */
1997 if (trueop0 == const0_rtx)
1998 return side_effects_p (op1)
1999 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2000 : const0_rtx;
2001 /* x%1 is 0 (of x&0 if x has side-effects). */
2002 if (trueop1 == const1_rtx)
2003 return side_effects_p (op0)
2004 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2005 : const0_rtx;
2006 /* Implement modulus by power of two as AND. */
2007 if (GET_CODE (trueop1) == CONST_INT
2008 && exact_log2 (INTVAL (trueop1)) > 0)
2009 return simplify_gen_binary (AND, mode, op0,
2010 GEN_INT (INTVAL (op1) - 1));
2011 break;
2013 case MOD:
2014 /* 0%x is 0 (or x&0 if x has side-effects). */
2015 if (trueop0 == const0_rtx)
2016 return side_effects_p (op1)
2017 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2018 : const0_rtx;
2019 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2020 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
2021 return side_effects_p (op0)
2022 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2023 : const0_rtx;
2024 break;
2026 case ROTATERT:
2027 case ROTATE:
2028 case ASHIFTRT:
2029 /* Rotating ~0 always results in ~0. */
2030 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2031 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2032 && ! side_effects_p (op1))
2033 return op0;
2035 /* Fall through.... */
2037 case ASHIFT:
2038 case LSHIFTRT:
2039 if (trueop1 == const0_rtx)
2040 return op0;
2041 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2042 return op0;
2043 break;
2045 case SMIN:
2046 if (width <= HOST_BITS_PER_WIDE_INT
2047 && GET_CODE (trueop1) == CONST_INT
2048 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2049 && ! side_effects_p (op0))
2050 return op1;
2051 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2052 return op0;
2053 tem = simplify_associative_operation (code, mode, op0, op1);
2054 if (tem)
2055 return tem;
2056 break;
2058 case SMAX:
2059 if (width <= HOST_BITS_PER_WIDE_INT
2060 && GET_CODE (trueop1) == CONST_INT
2061 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2062 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2063 && ! side_effects_p (op0))
2064 return op1;
2065 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2066 return op0;
2067 tem = simplify_associative_operation (code, mode, op0, op1);
2068 if (tem)
2069 return tem;
2070 break;
2072 case UMIN:
2073 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2074 return op1;
2075 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2076 return op0;
2077 tem = simplify_associative_operation (code, mode, op0, op1);
2078 if (tem)
2079 return tem;
2080 break;
2082 case UMAX:
2083 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2084 return op1;
2085 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2086 return op0;
2087 tem = simplify_associative_operation (code, mode, op0, op1);
2088 if (tem)
2089 return tem;
2090 break;
2092 case SS_PLUS:
2093 case US_PLUS:
2094 case SS_MINUS:
2095 case US_MINUS:
2096 /* ??? There are simplifications that can be done. */
2097 return 0;
2099 case VEC_SELECT:
2100 if (!VECTOR_MODE_P (mode))
2102 if (!VECTOR_MODE_P (GET_MODE (trueop0))
2103 || (mode
2104 != GET_MODE_INNER (GET_MODE (trueop0)))
2105 || GET_CODE (trueop1) != PARALLEL
2106 || XVECLEN (trueop1, 0) != 1
2107 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
2108 abort ();
2110 if (GET_CODE (trueop0) == CONST_VECTOR)
2111 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
2113 else
2115 if (!VECTOR_MODE_P (GET_MODE (trueop0))
2116 || (GET_MODE_INNER (mode)
2117 != GET_MODE_INNER (GET_MODE (trueop0)))
2118 || GET_CODE (trueop1) != PARALLEL)
2119 abort ();
2121 if (GET_CODE (trueop0) == CONST_VECTOR)
2123 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2124 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2125 rtvec v = rtvec_alloc (n_elts);
2126 unsigned int i;
2128 if (XVECLEN (trueop1, 0) != (int) n_elts)
2129 abort ();
2130 for (i = 0; i < n_elts; i++)
2132 rtx x = XVECEXP (trueop1, 0, i);
2134 if (GET_CODE (x) != CONST_INT)
2135 abort ();
2136 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
2139 return gen_rtx_CONST_VECTOR (mode, v);
2142 return 0;
2143 case VEC_CONCAT:
2145 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2146 ? GET_MODE (trueop0)
2147 : GET_MODE_INNER (mode));
2148 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2149 ? GET_MODE (trueop1)
2150 : GET_MODE_INNER (mode));
2152 if (!VECTOR_MODE_P (mode)
2153 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2154 != GET_MODE_SIZE (mode)))
2155 abort ();
2157 if ((VECTOR_MODE_P (op0_mode)
2158 && (GET_MODE_INNER (mode)
2159 != GET_MODE_INNER (op0_mode)))
2160 || (!VECTOR_MODE_P (op0_mode)
2161 && GET_MODE_INNER (mode) != op0_mode))
2162 abort ();
2164 if ((VECTOR_MODE_P (op1_mode)
2165 && (GET_MODE_INNER (mode)
2166 != GET_MODE_INNER (op1_mode)))
2167 || (!VECTOR_MODE_P (op1_mode)
2168 && GET_MODE_INNER (mode) != op1_mode))
2169 abort ();
2171 if ((GET_CODE (trueop0) == CONST_VECTOR
2172 || GET_CODE (trueop0) == CONST_INT
2173 || GET_CODE (trueop0) == CONST_DOUBLE)
2174 && (GET_CODE (trueop1) == CONST_VECTOR
2175 || GET_CODE (trueop1) == CONST_INT
2176 || GET_CODE (trueop1) == CONST_DOUBLE))
2178 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2179 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2180 rtvec v = rtvec_alloc (n_elts);
2181 unsigned int i;
2182 unsigned in_n_elts = 1;
2184 if (VECTOR_MODE_P (op0_mode))
2185 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2186 for (i = 0; i < n_elts; i++)
2188 if (i < in_n_elts)
2190 if (!VECTOR_MODE_P (op0_mode))
2191 RTVEC_ELT (v, i) = trueop0;
2192 else
2193 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2195 else
2197 if (!VECTOR_MODE_P (op1_mode))
2198 RTVEC_ELT (v, i) = trueop1;
2199 else
2200 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2201 i - in_n_elts);
2205 return gen_rtx_CONST_VECTOR (mode, v);
2208 return 0;
2210 default:
2211 abort ();
2214 return 0;
2217 /* Get the integer argument values in two forms:
2218 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2220 arg0 = INTVAL (trueop0);
2221 arg1 = INTVAL (trueop1);
2223 if (width < HOST_BITS_PER_WIDE_INT)
2225 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2226 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2228 arg0s = arg0;
2229 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2230 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2232 arg1s = arg1;
2233 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2234 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2236 else
2238 arg0s = arg0;
2239 arg1s = arg1;
2242 /* Compute the value of the arithmetic. */
2244 switch (code)
2246 case PLUS:
2247 val = arg0s + arg1s;
2248 break;
2250 case MINUS:
2251 val = arg0s - arg1s;
2252 break;
2254 case MULT:
2255 val = arg0s * arg1s;
2256 break;
2258 case DIV:
2259 if (arg1s == 0
2260 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2261 && arg1s == -1))
2262 return 0;
2263 val = arg0s / arg1s;
2264 break;
2266 case MOD:
2267 if (arg1s == 0
2268 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2269 && arg1s == -1))
2270 return 0;
2271 val = arg0s % arg1s;
2272 break;
2274 case UDIV:
2275 if (arg1 == 0
2276 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2277 && arg1s == -1))
2278 return 0;
2279 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2280 break;
2282 case UMOD:
2283 if (arg1 == 0
2284 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2285 && arg1s == -1))
2286 return 0;
2287 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2288 break;
2290 case AND:
2291 val = arg0 & arg1;
2292 break;
2294 case IOR:
2295 val = arg0 | arg1;
2296 break;
2298 case XOR:
2299 val = arg0 ^ arg1;
2300 break;
2302 case LSHIFTRT:
2303 /* If shift count is undefined, don't fold it; let the machine do
2304 what it wants. But truncate it if the machine will do that. */
2305 if (arg1 < 0)
2306 return 0;
2308 if (SHIFT_COUNT_TRUNCATED)
2309 arg1 %= width;
2311 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2312 break;
2314 case ASHIFT:
2315 if (arg1 < 0)
2316 return 0;
2318 if (SHIFT_COUNT_TRUNCATED)
2319 arg1 %= width;
2321 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2322 break;
2324 case ASHIFTRT:
2325 if (arg1 < 0)
2326 return 0;
2328 if (SHIFT_COUNT_TRUNCATED)
2329 arg1 %= width;
2331 val = arg0s >> arg1;
2333 /* Bootstrap compiler may not have sign extended the right shift.
2334 Manually extend the sign to insure bootstrap cc matches gcc. */
2335 if (arg0s < 0 && arg1 > 0)
2336 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2338 break;
2340 case ROTATERT:
2341 if (arg1 < 0)
2342 return 0;
2344 arg1 %= width;
2345 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2346 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2347 break;
2349 case ROTATE:
2350 if (arg1 < 0)
2351 return 0;
2353 arg1 %= width;
2354 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2355 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2356 break;
2358 case COMPARE:
2359 /* Do nothing here. */
2360 return 0;
2362 case SMIN:
2363 val = arg0s <= arg1s ? arg0s : arg1s;
2364 break;
2366 case UMIN:
2367 val = ((unsigned HOST_WIDE_INT) arg0
2368 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2369 break;
2371 case SMAX:
2372 val = arg0s > arg1s ? arg0s : arg1s;
2373 break;
2375 case UMAX:
2376 val = ((unsigned HOST_WIDE_INT) arg0
2377 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2378 break;
2380 case SS_PLUS:
2381 case US_PLUS:
2382 case SS_MINUS:
2383 case US_MINUS:
2384 /* ??? There are simplifications that can be done. */
2385 return 0;
2387 default:
2388 abort ();
2391 val = trunc_int_for_mode (val, mode);
2393 return GEN_INT (val);
2396 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2397 PLUS or MINUS.
2399 Rather than test for specific case, we do this by a brute-force method
2400 and do all possible simplifications until no more changes occur. Then
2401 we rebuild the operation.
2403 If FORCE is true, then always generate the rtx. This is used to
2404 canonicalize stuff emitted from simplify_gen_binary. Note that this
2405 can still fail if the rtx is too complex. It won't fail just because
2406 the result is not 'simpler' than the input, however. */
2408 struct simplify_plus_minus_op_data
2410 rtx op;
2411 int neg;
2414 static int
2415 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2417 const struct simplify_plus_minus_op_data *d1 = p1;
2418 const struct simplify_plus_minus_op_data *d2 = p2;
2420 return (commutative_operand_precedence (d2->op)
2421 - commutative_operand_precedence (d1->op));
2424 static rtx
2425 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2426 rtx op1, int force)
2428 struct simplify_plus_minus_op_data ops[8];
2429 rtx result, tem;
2430 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2431 int first, changed;
2432 int i, j;
2434 memset (ops, 0, sizeof ops);
2436 /* Set up the two operands and then expand them until nothing has been
2437 changed. If we run out of room in our array, give up; this should
2438 almost never happen. */
2440 ops[0].op = op0;
2441 ops[0].neg = 0;
2442 ops[1].op = op1;
2443 ops[1].neg = (code == MINUS);
2447 changed = 0;
2449 for (i = 0; i < n_ops; i++)
2451 rtx this_op = ops[i].op;
2452 int this_neg = ops[i].neg;
2453 enum rtx_code this_code = GET_CODE (this_op);
2455 switch (this_code)
2457 case PLUS:
2458 case MINUS:
2459 if (n_ops == 7)
2460 return NULL_RTX;
2462 ops[n_ops].op = XEXP (this_op, 1);
2463 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2464 n_ops++;
2466 ops[i].op = XEXP (this_op, 0);
2467 input_ops++;
2468 changed = 1;
2469 break;
2471 case NEG:
2472 ops[i].op = XEXP (this_op, 0);
2473 ops[i].neg = ! this_neg;
2474 changed = 1;
2475 break;
2477 case CONST:
2478 if (n_ops < 7
2479 && GET_CODE (XEXP (this_op, 0)) == PLUS
2480 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2481 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2483 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2484 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2485 ops[n_ops].neg = this_neg;
2486 n_ops++;
2487 input_consts++;
2488 changed = 1;
2490 break;
2492 case NOT:
2493 /* ~a -> (-a - 1) */
2494 if (n_ops != 7)
2496 ops[n_ops].op = constm1_rtx;
2497 ops[n_ops++].neg = this_neg;
2498 ops[i].op = XEXP (this_op, 0);
2499 ops[i].neg = !this_neg;
2500 changed = 1;
2502 break;
2504 case CONST_INT:
2505 if (this_neg)
2507 ops[i].op = neg_const_int (mode, this_op);
2508 ops[i].neg = 0;
2509 changed = 1;
2511 break;
2513 default:
2514 break;
2518 while (changed);
2520 /* If we only have two operands, we can't do anything. */
2521 if (n_ops <= 2 && !force)
2522 return NULL_RTX;
2524 /* Count the number of CONSTs we didn't split above. */
2525 for (i = 0; i < n_ops; i++)
2526 if (GET_CODE (ops[i].op) == CONST)
2527 input_consts++;
2529 /* Now simplify each pair of operands until nothing changes. The first
2530 time through just simplify constants against each other. */
2532 first = 1;
2535 changed = first;
2537 for (i = 0; i < n_ops - 1; i++)
2538 for (j = i + 1; j < n_ops; j++)
2540 rtx lhs = ops[i].op, rhs = ops[j].op;
2541 int lneg = ops[i].neg, rneg = ops[j].neg;
2543 if (lhs != 0 && rhs != 0
2544 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2546 enum rtx_code ncode = PLUS;
2548 if (lneg != rneg)
2550 ncode = MINUS;
2551 if (lneg)
2552 tem = lhs, lhs = rhs, rhs = tem;
2554 else if (swap_commutative_operands_p (lhs, rhs))
2555 tem = lhs, lhs = rhs, rhs = tem;
2557 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2559 /* Reject "simplifications" that just wrap the two
2560 arguments in a CONST. Failure to do so can result
2561 in infinite recursion with simplify_binary_operation
2562 when it calls us to simplify CONST operations. */
2563 if (tem
2564 && ! (GET_CODE (tem) == CONST
2565 && GET_CODE (XEXP (tem, 0)) == ncode
2566 && XEXP (XEXP (tem, 0), 0) == lhs
2567 && XEXP (XEXP (tem, 0), 1) == rhs)
2568 /* Don't allow -x + -1 -> ~x simplifications in the
2569 first pass. This allows us the chance to combine
2570 the -1 with other constants. */
2571 && ! (first
2572 && GET_CODE (tem) == NOT
2573 && XEXP (tem, 0) == rhs))
2575 lneg &= rneg;
2576 if (GET_CODE (tem) == NEG)
2577 tem = XEXP (tem, 0), lneg = !lneg;
2578 if (GET_CODE (tem) == CONST_INT && lneg)
2579 tem = neg_const_int (mode, tem), lneg = 0;
2581 ops[i].op = tem;
2582 ops[i].neg = lneg;
2583 ops[j].op = NULL_RTX;
2584 changed = 1;
2589 first = 0;
2591 while (changed);
2593 /* Pack all the operands to the lower-numbered entries. */
2594 for (i = 0, j = 0; j < n_ops; j++)
2595 if (ops[j].op)
2596 ops[i++] = ops[j];
2597 n_ops = i;
2599 /* Sort the operations based on swap_commutative_operands_p. */
2600 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2602 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2603 if (n_ops == 2
2604 && GET_CODE (ops[1].op) == CONST_INT
2605 && CONSTANT_P (ops[0].op)
2606 && ops[0].neg)
2607 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2609 /* We suppressed creation of trivial CONST expressions in the
2610 combination loop to avoid recursion. Create one manually now.
2611 The combination loop should have ensured that there is exactly
2612 one CONST_INT, and the sort will have ensured that it is last
2613 in the array and that any other constant will be next-to-last. */
2615 if (n_ops > 1
2616 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2617 && CONSTANT_P (ops[n_ops - 2].op))
2619 rtx value = ops[n_ops - 1].op;
2620 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2621 value = neg_const_int (mode, value);
2622 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2623 n_ops--;
2626 /* Count the number of CONSTs that we generated. */
2627 n_consts = 0;
2628 for (i = 0; i < n_ops; i++)
2629 if (GET_CODE (ops[i].op) == CONST)
2630 n_consts++;
2632 /* Give up if we didn't reduce the number of operands we had. Make
2633 sure we count a CONST as two operands. If we have the same
2634 number of operands, but have made more CONSTs than before, this
2635 is also an improvement, so accept it. */
2636 if (!force
2637 && (n_ops + n_consts > input_ops
2638 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2639 return NULL_RTX;
2641 /* Put a non-negated operand first, if possible. */
2643 for (i = 0; i < n_ops && ops[i].neg; i++)
2644 continue;
2645 if (i == n_ops)
2646 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2647 else if (i != 0)
2649 tem = ops[0].op;
2650 ops[0] = ops[i];
2651 ops[i].op = tem;
2652 ops[i].neg = 1;
2655 /* Now make the result by performing the requested operations. */
2656 result = ops[0].op;
2657 for (i = 1; i < n_ops; i++)
2658 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2659 mode, result, ops[i].op);
2661 return result;
2664 /* Like simplify_binary_operation except used for relational operators.
2665 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2666 also be VOIDmode.
2668 CMP_MODE specifies in which mode the comparison is done in, so it is
2669 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2670 the operands or, if both are VOIDmode, the operands are compared in
2671 "infinite precision". */
2673 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2674 enum machine_mode cmp_mode, rtx op0, rtx op1)
2676 rtx tem, trueop0, trueop1;
2678 if (cmp_mode == VOIDmode)
2679 cmp_mode = GET_MODE (op0);
2680 if (cmp_mode == VOIDmode)
2681 cmp_mode = GET_MODE (op1);
2683 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2684 if (tem)
2686 #ifdef FLOAT_STORE_FLAG_VALUE
2687 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2689 if (tem == const0_rtx)
2690 return CONST0_RTX (mode);
2691 else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2693 REAL_VALUE_TYPE val;
2694 val = FLOAT_STORE_FLAG_VALUE (mode);
2695 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2698 #endif
2700 return tem;
2703 /* For the following tests, ensure const0_rtx is op1. */
2704 if (swap_commutative_operands_p (op0, op1)
2705 || (op0 == const0_rtx && op1 != const0_rtx))
2706 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2708 /* If op0 is a compare, extract the comparison arguments from it. */
2709 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2710 return simplify_relational_operation (code, mode, VOIDmode,
2711 XEXP (op0, 0), XEXP (op0, 1));
2713 if (mode == VOIDmode
2714 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2715 || CC0_P (op0))
2716 return NULL_RTX;
2718 trueop0 = avoid_constant_pool_reference (op0);
2719 trueop1 = avoid_constant_pool_reference (op1);
2720 return simplify_relational_operation_1 (code, mode, cmp_mode,
2721 trueop0, trueop1);
2724 /* This part of simplify_relational_operation is only used when CMP_MODE
2725 is not in class MODE_CC (i.e. it is a real comparison).
2727 MODE is the mode of the result, while CMP_MODE specifies in which
2728 mode the comparison is done in, so it is the mode of the operands. */
2730 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2731 enum machine_mode cmp_mode, rtx op0, rtx op1)
2733 if (GET_CODE (op1) == CONST_INT)
2735 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2737 /* If op0 is a comparison, extract the comparison arguments form it. */
2738 if (code == NE)
2740 if (GET_MODE (op0) == cmp_mode)
2741 return simplify_rtx (op0);
2742 else
2743 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2744 XEXP (op0, 0), XEXP (op0, 1));
2746 else if (code == EQ)
2748 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
2749 if (new != UNKNOWN)
2750 return simplify_gen_relational (new, mode, VOIDmode,
2751 XEXP (op0, 0), XEXP (op0, 1));
2756 return NULL_RTX;
2759 /* Check if the given comparison (done in the given MODE) is actually a
2760 tautology or a contradiction.
2761 If no simplification is possible, this function returns zero.
2762 Otherwise, it returns either const_true_rtx or const0_rtx. */
2765 simplify_const_relational_operation (enum rtx_code code,
2766 enum machine_mode mode,
2767 rtx op0, rtx op1)
2769 int equal, op0lt, op0ltu, op1lt, op1ltu;
2770 rtx tem;
2771 rtx trueop0;
2772 rtx trueop1;
2774 if (mode == VOIDmode
2775 && (GET_MODE (op0) != VOIDmode
2776 || GET_MODE (op1) != VOIDmode))
2777 abort ();
2779 /* If op0 is a compare, extract the comparison arguments from it. */
2780 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2781 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2783 /* We can't simplify MODE_CC values since we don't know what the
2784 actual comparison is. */
2785 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2786 return 0;
2788 /* Make sure the constant is second. */
2789 if (swap_commutative_operands_p (op0, op1))
2791 tem = op0, op0 = op1, op1 = tem;
2792 code = swap_condition (code);
2795 trueop0 = avoid_constant_pool_reference (op0);
2796 trueop1 = avoid_constant_pool_reference (op1);
2798 /* For integer comparisons of A and B maybe we can simplify A - B and can
2799 then simplify a comparison of that with zero. If A and B are both either
2800 a register or a CONST_INT, this can't help; testing for these cases will
2801 prevent infinite recursion here and speed things up.
2803 If CODE is an unsigned comparison, then we can never do this optimization,
2804 because it gives an incorrect result if the subtraction wraps around zero.
2805 ANSI C defines unsigned operations such that they never overflow, and
2806 thus such cases can not be ignored; but we cannot do it even for
2807 signed comparisons for languages such as Java, so test flag_wrapv. */
2809 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2810 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2811 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2812 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2813 /* We cannot do this for == or != if tem is a nonzero address. */
2814 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2815 && code != GTU && code != GEU && code != LTU && code != LEU)
2816 return simplify_const_relational_operation (signed_condition (code),
2817 mode, tem, const0_rtx);
2819 if (flag_unsafe_math_optimizations && code == ORDERED)
2820 return const_true_rtx;
2822 if (flag_unsafe_math_optimizations && code == UNORDERED)
2823 return const0_rtx;
2825 /* For modes without NaNs, if the two operands are equal, we know the
2826 result except if they have side-effects. */
2827 if (! HONOR_NANS (GET_MODE (trueop0))
2828 && rtx_equal_p (trueop0, trueop1)
2829 && ! side_effects_p (trueop0))
2830 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2832 /* If the operands are floating-point constants, see if we can fold
2833 the result. */
2834 else if (GET_CODE (trueop0) == CONST_DOUBLE
2835 && GET_CODE (trueop1) == CONST_DOUBLE
2836 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2838 REAL_VALUE_TYPE d0, d1;
2840 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2841 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2843 /* Comparisons are unordered iff at least one of the values is NaN. */
2844 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2845 switch (code)
2847 case UNEQ:
2848 case UNLT:
2849 case UNGT:
2850 case UNLE:
2851 case UNGE:
2852 case NE:
2853 case UNORDERED:
2854 return const_true_rtx;
2855 case EQ:
2856 case LT:
2857 case GT:
2858 case LE:
2859 case GE:
2860 case LTGT:
2861 case ORDERED:
2862 return const0_rtx;
2863 default:
2864 return 0;
2867 equal = REAL_VALUES_EQUAL (d0, d1);
2868 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2869 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2872 /* Otherwise, see if the operands are both integers. */
2873 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2874 && (GET_CODE (trueop0) == CONST_DOUBLE
2875 || GET_CODE (trueop0) == CONST_INT)
2876 && (GET_CODE (trueop1) == CONST_DOUBLE
2877 || GET_CODE (trueop1) == CONST_INT))
2879 int width = GET_MODE_BITSIZE (mode);
2880 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2881 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2883 /* Get the two words comprising each integer constant. */
2884 if (GET_CODE (trueop0) == CONST_DOUBLE)
2886 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2887 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2889 else
2891 l0u = l0s = INTVAL (trueop0);
2892 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2895 if (GET_CODE (trueop1) == CONST_DOUBLE)
2897 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2898 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2900 else
2902 l1u = l1s = INTVAL (trueop1);
2903 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2906 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2907 we have to sign or zero-extend the values. */
2908 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2910 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2911 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2913 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2914 l0s |= ((HOST_WIDE_INT) (-1) << width);
2916 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2917 l1s |= ((HOST_WIDE_INT) (-1) << width);
2919 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2920 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2922 equal = (h0u == h1u && l0u == l1u);
2923 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2924 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2925 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2926 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2929 /* Otherwise, there are some code-specific tests we can make. */
2930 else
2932 /* Optimize comparisons with upper and lower bounds. */
2933 if (INTEGRAL_MODE_P (mode)
2934 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2936 rtx mmin, mmax;
2937 int sign;
2939 if (code == GEU
2940 || code == LEU
2941 || code == GTU
2942 || code == LTU)
2943 sign = 0;
2944 else
2945 sign = 1;
2947 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
2949 tem = NULL_RTX;
2950 switch (code)
2952 case GEU:
2953 case GE:
2954 /* x >= min is always true. */
2955 if (rtx_equal_p (trueop1, mmin))
2956 tem = const_true_rtx;
2957 else
2958 break;
2960 case LEU:
2961 case LE:
2962 /* x <= max is always true. */
2963 if (rtx_equal_p (trueop1, mmax))
2964 tem = const_true_rtx;
2965 break;
2967 case GTU:
2968 case GT:
2969 /* x > max is always false. */
2970 if (rtx_equal_p (trueop1, mmax))
2971 tem = const0_rtx;
2972 break;
2974 case LTU:
2975 case LT:
2976 /* x < min is always false. */
2977 if (rtx_equal_p (trueop1, mmin))
2978 tem = const0_rtx;
2979 break;
2981 default:
2982 break;
2984 if (tem == const0_rtx
2985 || tem == const_true_rtx)
2986 return tem;
2989 switch (code)
2991 case EQ:
2992 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2993 return const0_rtx;
2994 break;
2996 case NE:
2997 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2998 return const_true_rtx;
2999 break;
3001 case LT:
3002 /* Optimize abs(x) < 0.0. */
3003 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3005 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3006 : trueop0;
3007 if (GET_CODE (tem) == ABS)
3008 return const0_rtx;
3010 break;
3012 case GE:
3013 /* Optimize abs(x) >= 0.0. */
3014 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3016 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3017 : trueop0;
3018 if (GET_CODE (tem) == ABS)
3019 return const_true_rtx;
3021 break;
3023 case UNGE:
3024 /* Optimize ! (abs(x) < 0.0). */
3025 if (trueop1 == CONST0_RTX (mode))
3027 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3028 : trueop0;
3029 if (GET_CODE (tem) == ABS)
3030 return const_true_rtx;
3032 break;
3034 default:
3035 break;
3038 return 0;
3041 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3042 as appropriate. */
3043 switch (code)
3045 case EQ:
3046 case UNEQ:
3047 return equal ? const_true_rtx : const0_rtx;
3048 case NE:
3049 case LTGT:
3050 return ! equal ? const_true_rtx : const0_rtx;
3051 case LT:
3052 case UNLT:
3053 return op0lt ? const_true_rtx : const0_rtx;
3054 case GT:
3055 case UNGT:
3056 return op1lt ? const_true_rtx : const0_rtx;
3057 case LTU:
3058 return op0ltu ? const_true_rtx : const0_rtx;
3059 case GTU:
3060 return op1ltu ? const_true_rtx : const0_rtx;
3061 case LE:
3062 case UNLE:
3063 return equal || op0lt ? const_true_rtx : const0_rtx;
3064 case GE:
3065 case UNGE:
3066 return equal || op1lt ? const_true_rtx : const0_rtx;
3067 case LEU:
3068 return equal || op0ltu ? const_true_rtx : const0_rtx;
3069 case GEU:
3070 return equal || op1ltu ? const_true_rtx : const0_rtx;
3071 case ORDERED:
3072 return const_true_rtx;
3073 case UNORDERED:
3074 return const0_rtx;
3075 default:
3076 abort ();
3080 /* Simplify CODE, an operation with result mode MODE and three operands,
3081 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3082 a constant. Return 0 if no simplifications is possible. */
3085 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3086 enum machine_mode op0_mode, rtx op0, rtx op1,
3087 rtx op2)
3089 unsigned int width = GET_MODE_BITSIZE (mode);
3091 /* VOIDmode means "infinite" precision. */
3092 if (width == 0)
3093 width = HOST_BITS_PER_WIDE_INT;
3095 switch (code)
3097 case SIGN_EXTRACT:
3098 case ZERO_EXTRACT:
3099 if (GET_CODE (op0) == CONST_INT
3100 && GET_CODE (op1) == CONST_INT
3101 && GET_CODE (op2) == CONST_INT
3102 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3103 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3105 /* Extracting a bit-field from a constant */
3106 HOST_WIDE_INT val = INTVAL (op0);
3108 if (BITS_BIG_ENDIAN)
3109 val >>= (GET_MODE_BITSIZE (op0_mode)
3110 - INTVAL (op2) - INTVAL (op1));
3111 else
3112 val >>= INTVAL (op2);
3114 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3116 /* First zero-extend. */
3117 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3118 /* If desired, propagate sign bit. */
3119 if (code == SIGN_EXTRACT
3120 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3121 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3124 /* Clear the bits that don't belong in our mode,
3125 unless they and our sign bit are all one.
3126 So we get either a reasonable negative value or a reasonable
3127 unsigned value for this mode. */
3128 if (width < HOST_BITS_PER_WIDE_INT
3129 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3130 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3131 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3133 return GEN_INT (val);
3135 break;
3137 case IF_THEN_ELSE:
3138 if (GET_CODE (op0) == CONST_INT)
3139 return op0 != const0_rtx ? op1 : op2;
3141 /* Convert c ? a : a into "a". */
3142 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3143 return op1;
3145 /* Convert a != b ? a : b into "a". */
3146 if (GET_CODE (op0) == NE
3147 && ! side_effects_p (op0)
3148 && ! HONOR_NANS (mode)
3149 && ! HONOR_SIGNED_ZEROS (mode)
3150 && ((rtx_equal_p (XEXP (op0, 0), op1)
3151 && rtx_equal_p (XEXP (op0, 1), op2))
3152 || (rtx_equal_p (XEXP (op0, 0), op2)
3153 && rtx_equal_p (XEXP (op0, 1), op1))))
3154 return op1;
3156 /* Convert a == b ? a : b into "b". */
3157 if (GET_CODE (op0) == EQ
3158 && ! side_effects_p (op0)
3159 && ! HONOR_NANS (mode)
3160 && ! HONOR_SIGNED_ZEROS (mode)
3161 && ((rtx_equal_p (XEXP (op0, 0), op1)
3162 && rtx_equal_p (XEXP (op0, 1), op2))
3163 || (rtx_equal_p (XEXP (op0, 0), op2)
3164 && rtx_equal_p (XEXP (op0, 1), op1))))
3165 return op2;
3167 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3169 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3170 ? GET_MODE (XEXP (op0, 1))
3171 : GET_MODE (XEXP (op0, 0)));
3172 rtx temp;
3174 /* Look for happy constants in op1 and op2. */
3175 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3177 HOST_WIDE_INT t = INTVAL (op1);
3178 HOST_WIDE_INT f = INTVAL (op2);
3180 if (t == STORE_FLAG_VALUE && f == 0)
3181 code = GET_CODE (op0);
3182 else if (t == 0 && f == STORE_FLAG_VALUE)
3184 enum rtx_code tmp;
3185 tmp = reversed_comparison_code (op0, NULL_RTX);
3186 if (tmp == UNKNOWN)
3187 break;
3188 code = tmp;
3190 else
3191 break;
3193 return simplify_gen_relational (code, mode, cmp_mode,
3194 XEXP (op0, 0), XEXP (op0, 1));
3197 if (cmp_mode == VOIDmode)
3198 cmp_mode = op0_mode;
3199 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3200 cmp_mode, XEXP (op0, 0),
3201 XEXP (op0, 1));
3203 /* See if any simplifications were possible. */
3204 if (temp)
3206 if (GET_CODE (temp) == CONST_INT)
3207 return temp == const0_rtx ? op2 : op1;
3208 else if (temp)
3209 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3212 break;
3214 case VEC_MERGE:
3215 if (GET_MODE (op0) != mode
3216 || GET_MODE (op1) != mode
3217 || !VECTOR_MODE_P (mode))
3218 abort ();
3219 op2 = avoid_constant_pool_reference (op2);
3220 if (GET_CODE (op2) == CONST_INT)
3222 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3223 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3224 int mask = (1 << n_elts) - 1;
3226 if (!(INTVAL (op2) & mask))
3227 return op1;
3228 if ((INTVAL (op2) & mask) == mask)
3229 return op0;
3231 op0 = avoid_constant_pool_reference (op0);
3232 op1 = avoid_constant_pool_reference (op1);
3233 if (GET_CODE (op0) == CONST_VECTOR
3234 && GET_CODE (op1) == CONST_VECTOR)
3236 rtvec v = rtvec_alloc (n_elts);
3237 unsigned int i;
3239 for (i = 0; i < n_elts; i++)
3240 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3241 ? CONST_VECTOR_ELT (op0, i)
3242 : CONST_VECTOR_ELT (op1, i));
3243 return gen_rtx_CONST_VECTOR (mode, v);
3246 break;
3248 default:
3249 abort ();
3252 return 0;
3255 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3256 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3258 Works by unpacking OP into a collection of 8-bit values
3259 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3260 and then repacking them again for OUTERMODE. */
3262 static rtx
3263 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3264 enum machine_mode innermode, unsigned int byte)
3266 /* We support up to 512-bit values (for V8DFmode). */
3267 enum {
3268 max_bitsize = 512,
3269 value_bit = 8,
3270 value_mask = (1 << value_bit) - 1
3272 unsigned char value[max_bitsize / value_bit];
3273 int value_start;
3274 int i;
3275 int elem;
3277 int num_elem;
3278 rtx * elems;
3279 int elem_bitsize;
3280 rtx result_s;
3281 rtvec result_v = NULL;
3282 enum mode_class outer_class;
3283 enum machine_mode outer_submode;
3285 /* Some ports misuse CCmode. */
3286 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3287 return op;
3289 /* Unpack the value. */
3291 if (GET_CODE (op) == CONST_VECTOR)
3293 num_elem = CONST_VECTOR_NUNITS (op);
3294 elems = &CONST_VECTOR_ELT (op, 0);
3295 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3297 else
3299 num_elem = 1;
3300 elems = &op;
3301 elem_bitsize = max_bitsize;
3304 if (BITS_PER_UNIT % value_bit != 0)
3305 abort (); /* Too complicated; reducing value_bit may help. */
3306 if (elem_bitsize % BITS_PER_UNIT != 0)
3307 abort (); /* I don't know how to handle endianness of sub-units. */
3309 for (elem = 0; elem < num_elem; elem++)
3311 unsigned char * vp;
3312 rtx el = elems[elem];
3314 /* Vectors are kept in target memory order. (This is probably
3315 a mistake.) */
3317 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3318 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3319 / BITS_PER_UNIT);
3320 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3321 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3322 unsigned bytele = (subword_byte % UNITS_PER_WORD
3323 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3324 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3327 switch (GET_CODE (el))
3329 case CONST_INT:
3330 for (i = 0;
3331 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3332 i += value_bit)
3333 *vp++ = INTVAL (el) >> i;
3334 /* CONST_INTs are always logically sign-extended. */
3335 for (; i < elem_bitsize; i += value_bit)
3336 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3337 break;
3339 case CONST_DOUBLE:
3340 if (GET_MODE (el) == VOIDmode)
3342 /* If this triggers, someone should have generated a
3343 CONST_INT instead. */
3344 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3345 abort ();
3347 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3348 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3349 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3351 *vp++
3352 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3353 i += value_bit;
3355 /* It shouldn't matter what's done here, so fill it with
3356 zero. */
3357 for (; i < max_bitsize; i += value_bit)
3358 *vp++ = 0;
3360 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3362 long tmp[max_bitsize / 32];
3363 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3365 if (bitsize > elem_bitsize)
3366 abort ();
3367 if (bitsize % value_bit != 0)
3368 abort ();
3370 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3371 GET_MODE (el));
3373 /* real_to_target produces its result in words affected by
3374 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3375 and use WORDS_BIG_ENDIAN instead; see the documentation
3376 of SUBREG in rtl.texi. */
3377 for (i = 0; i < bitsize; i += value_bit)
3379 int ibase;
3380 if (WORDS_BIG_ENDIAN)
3381 ibase = bitsize - 1 - i;
3382 else
3383 ibase = i;
3384 *vp++ = tmp[ibase / 32] >> i % 32;
3387 /* It shouldn't matter what's done here, so fill it with
3388 zero. */
3389 for (; i < elem_bitsize; i += value_bit)
3390 *vp++ = 0;
3392 else
3393 abort ();
3394 break;
3396 default:
3397 abort ();
3401 /* Now, pick the right byte to start with. */
3402 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3403 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3404 will already have offset 0. */
3405 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3407 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3408 - byte);
3409 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3410 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3411 byte = (subword_byte % UNITS_PER_WORD
3412 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3415 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3416 so if it's become negative it will instead be very large.) */
3417 if (byte >= GET_MODE_SIZE (innermode))
3418 abort ();
3420 /* Convert from bytes to chunks of size value_bit. */
3421 value_start = byte * (BITS_PER_UNIT / value_bit);
3423 /* Re-pack the value. */
3425 if (VECTOR_MODE_P (outermode))
3427 num_elem = GET_MODE_NUNITS (outermode);
3428 result_v = rtvec_alloc (num_elem);
3429 elems = &RTVEC_ELT (result_v, 0);
3430 outer_submode = GET_MODE_INNER (outermode);
3432 else
3434 num_elem = 1;
3435 elems = &result_s;
3436 outer_submode = outermode;
3439 outer_class = GET_MODE_CLASS (outer_submode);
3440 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3442 if (elem_bitsize % value_bit != 0)
3443 abort ();
3444 if (elem_bitsize + value_start * value_bit > max_bitsize)
3445 abort ();
3447 for (elem = 0; elem < num_elem; elem++)
3449 unsigned char *vp;
3451 /* Vectors are stored in target memory order. (This is probably
3452 a mistake.) */
3454 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3455 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3456 / BITS_PER_UNIT);
3457 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3458 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3459 unsigned bytele = (subword_byte % UNITS_PER_WORD
3460 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3461 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3464 switch (outer_class)
3466 case MODE_INT:
3467 case MODE_PARTIAL_INT:
3469 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3471 for (i = 0;
3472 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3473 i += value_bit)
3474 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3475 for (; i < elem_bitsize; i += value_bit)
3476 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3477 << (i - HOST_BITS_PER_WIDE_INT));
3479 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3480 know why. */
3481 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3482 elems[elem] = gen_int_mode (lo, outer_submode);
3483 else
3484 elems[elem] = immed_double_const (lo, hi, outer_submode);
3486 break;
3488 case MODE_FLOAT:
3490 REAL_VALUE_TYPE r;
3491 long tmp[max_bitsize / 32];
3493 /* real_from_target wants its input in words affected by
3494 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3495 and use WORDS_BIG_ENDIAN instead; see the documentation
3496 of SUBREG in rtl.texi. */
3497 for (i = 0; i < max_bitsize / 32; i++)
3498 tmp[i] = 0;
3499 for (i = 0; i < elem_bitsize; i += value_bit)
3501 int ibase;
3502 if (WORDS_BIG_ENDIAN)
3503 ibase = elem_bitsize - 1 - i;
3504 else
3505 ibase = i;
3506 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3509 real_from_target (&r, tmp, outer_submode);
3510 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3512 break;
3514 default:
3515 abort ();
3518 if (VECTOR_MODE_P (outermode))
3519 return gen_rtx_CONST_VECTOR (outermode, result_v);
3520 else
3521 return result_s;
3524 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3525 Return 0 if no simplifications are possible. */
3527 simplify_subreg (enum machine_mode outermode, rtx op,
3528 enum machine_mode innermode, unsigned int byte)
3530 /* Little bit of sanity checking. */
3531 if (innermode == VOIDmode || outermode == VOIDmode
3532 || innermode == BLKmode || outermode == BLKmode)
3533 abort ();
3535 if (GET_MODE (op) != innermode
3536 && GET_MODE (op) != VOIDmode)
3537 abort ();
3539 if (byte % GET_MODE_SIZE (outermode)
3540 || byte >= GET_MODE_SIZE (innermode))
3541 abort ();
3543 if (outermode == innermode && !byte)
3544 return op;
3546 if (GET_CODE (op) == CONST_INT
3547 || GET_CODE (op) == CONST_DOUBLE
3548 || GET_CODE (op) == CONST_VECTOR)
3549 return simplify_immed_subreg (outermode, op, innermode, byte);
3551 /* Changing mode twice with SUBREG => just change it once,
3552 or not at all if changing back op starting mode. */
3553 if (GET_CODE (op) == SUBREG)
3555 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3556 int final_offset = byte + SUBREG_BYTE (op);
3557 rtx new;
3559 if (outermode == innermostmode
3560 && byte == 0 && SUBREG_BYTE (op) == 0)
3561 return SUBREG_REG (op);
3563 /* The SUBREG_BYTE represents offset, as if the value were stored
3564 in memory. Irritating exception is paradoxical subreg, where
3565 we define SUBREG_BYTE to be 0. On big endian machines, this
3566 value should be negative. For a moment, undo this exception. */
3567 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3569 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3570 if (WORDS_BIG_ENDIAN)
3571 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3572 if (BYTES_BIG_ENDIAN)
3573 final_offset += difference % UNITS_PER_WORD;
3575 if (SUBREG_BYTE (op) == 0
3576 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3578 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3579 if (WORDS_BIG_ENDIAN)
3580 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3581 if (BYTES_BIG_ENDIAN)
3582 final_offset += difference % UNITS_PER_WORD;
3585 /* See whether resulting subreg will be paradoxical. */
3586 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3588 /* In nonparadoxical subregs we can't handle negative offsets. */
3589 if (final_offset < 0)
3590 return NULL_RTX;
3591 /* Bail out in case resulting subreg would be incorrect. */
3592 if (final_offset % GET_MODE_SIZE (outermode)
3593 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3594 return NULL_RTX;
3596 else
3598 int offset = 0;
3599 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3601 /* In paradoxical subreg, see if we are still looking on lower part.
3602 If so, our SUBREG_BYTE will be 0. */
3603 if (WORDS_BIG_ENDIAN)
3604 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3605 if (BYTES_BIG_ENDIAN)
3606 offset += difference % UNITS_PER_WORD;
3607 if (offset == final_offset)
3608 final_offset = 0;
3609 else
3610 return NULL_RTX;
3613 /* Recurse for further possible simplifications. */
3614 new = simplify_subreg (outermode, SUBREG_REG (op),
3615 GET_MODE (SUBREG_REG (op)),
3616 final_offset);
3617 if (new)
3618 return new;
3619 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3622 /* SUBREG of a hard register => just change the register number
3623 and/or mode. If the hard register is not valid in that mode,
3624 suppress this simplification. If the hard register is the stack,
3625 frame, or argument pointer, leave this as a SUBREG. */
3627 if (REG_P (op)
3628 && (! REG_FUNCTION_VALUE_P (op)
3629 || ! rtx_equal_function_value_matters)
3630 && REGNO (op) < FIRST_PSEUDO_REGISTER
3631 #ifdef CANNOT_CHANGE_MODE_CLASS
3632 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3633 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3634 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3635 #endif
3636 && ((reload_completed && !frame_pointer_needed)
3637 || (REGNO (op) != FRAME_POINTER_REGNUM
3638 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3639 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3640 #endif
3642 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3643 && REGNO (op) != ARG_POINTER_REGNUM
3644 #endif
3645 && REGNO (op) != STACK_POINTER_REGNUM
3646 && subreg_offset_representable_p (REGNO (op), innermode,
3647 byte, outermode))
3649 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3650 int final_regno = subreg_hard_regno (tem, 0);
3652 /* ??? We do allow it if the current REG is not valid for
3653 its mode. This is a kludge to work around how float/complex
3654 arguments are passed on 32-bit SPARC and should be fixed. */
3655 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3656 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3658 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3660 /* Propagate original regno. We don't have any way to specify
3661 the offset inside original regno, so do so only for lowpart.
3662 The information is used only by alias analysis that can not
3663 grog partial register anyway. */
3665 if (subreg_lowpart_offset (outermode, innermode) == byte)
3666 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3667 return x;
3671 /* If we have a SUBREG of a register that we are replacing and we are
3672 replacing it with a MEM, make a new MEM and try replacing the
3673 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3674 or if we would be widening it. */
3676 if (GET_CODE (op) == MEM
3677 && ! mode_dependent_address_p (XEXP (op, 0))
3678 /* Allow splitting of volatile memory references in case we don't
3679 have instruction to move the whole thing. */
3680 && (! MEM_VOLATILE_P (op)
3681 || ! have_insn_for (SET, innermode))
3682 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3683 return adjust_address_nv (op, outermode, byte);
3685 /* Handle complex values represented as CONCAT
3686 of real and imaginary part. */
3687 if (GET_CODE (op) == CONCAT)
3689 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3690 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3691 unsigned int final_offset;
3692 rtx res;
3694 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3695 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3696 if (res)
3697 return res;
3698 /* We can at least simplify it by referring directly to the
3699 relevant part. */
3700 return gen_rtx_SUBREG (outermode, part, final_offset);
3703 /* Optimize SUBREG truncations of zero and sign extended values. */
3704 if ((GET_CODE (op) == ZERO_EXTEND
3705 || GET_CODE (op) == SIGN_EXTEND)
3706 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3708 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3710 /* If we're requesting the lowpart of a zero or sign extension,
3711 there are three possibilities. If the outermode is the same
3712 as the origmode, we can omit both the extension and the subreg.
3713 If the outermode is not larger than the origmode, we can apply
3714 the truncation without the extension. Finally, if the outermode
3715 is larger than the origmode, but both are integer modes, we
3716 can just extend to the appropriate mode. */
3717 if (bitpos == 0)
3719 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3720 if (outermode == origmode)
3721 return XEXP (op, 0);
3722 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3723 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3724 subreg_lowpart_offset (outermode,
3725 origmode));
3726 if (SCALAR_INT_MODE_P (outermode))
3727 return simplify_gen_unary (GET_CODE (op), outermode,
3728 XEXP (op, 0), origmode);
3731 /* A SUBREG resulting from a zero extension may fold to zero if
3732 it extracts higher bits that the ZERO_EXTEND's source bits. */
3733 if (GET_CODE (op) == ZERO_EXTEND
3734 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3735 return CONST0_RTX (outermode);
3738 return NULL_RTX;
3741 /* Make a SUBREG operation or equivalent if it folds. */
3744 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3745 enum machine_mode innermode, unsigned int byte)
3747 rtx new;
3748 /* Little bit of sanity checking. */
3749 if (innermode == VOIDmode || outermode == VOIDmode
3750 || innermode == BLKmode || outermode == BLKmode)
3751 abort ();
3753 if (GET_MODE (op) != innermode
3754 && GET_MODE (op) != VOIDmode)
3755 abort ();
3757 if (byte % GET_MODE_SIZE (outermode)
3758 || byte >= GET_MODE_SIZE (innermode))
3759 abort ();
3761 if (GET_CODE (op) == QUEUED)
3762 return NULL_RTX;
3764 new = simplify_subreg (outermode, op, innermode, byte);
3765 if (new)
3766 return new;
3768 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3769 return NULL_RTX;
3771 return gen_rtx_SUBREG (outermode, op, byte);
3773 /* Simplify X, an rtx expression.
3775 Return the simplified expression or NULL if no simplifications
3776 were possible.
3778 This is the preferred entry point into the simplification routines;
3779 however, we still allow passes to call the more specific routines.
3781 Right now GCC has three (yes, three) major bodies of RTL simplification
3782 code that need to be unified.
3784 1. fold_rtx in cse.c. This code uses various CSE specific
3785 information to aid in RTL simplification.
3787 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3788 it uses combine specific information to aid in RTL
3789 simplification.
3791 3. The routines in this file.
3794 Long term we want to only have one body of simplification code; to
3795 get to that state I recommend the following steps:
3797 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3798 which are not pass dependent state into these routines.
3800 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3801 use this routine whenever possible.
3803 3. Allow for pass dependent state to be provided to these
3804 routines and add simplifications based on the pass dependent
3805 state. Remove code from cse.c & combine.c that becomes
3806 redundant/dead.
3808 It will take time, but ultimately the compiler will be easier to
3809 maintain and improve. It's totally silly that when we add a
3810 simplification that it needs to be added to 4 places (3 for RTL
3811 simplification and 1 for tree simplification. */
3814 simplify_rtx (rtx x)
3816 enum rtx_code code = GET_CODE (x);
3817 enum machine_mode mode = GET_MODE (x);
3819 switch (GET_RTX_CLASS (code))
3821 case RTX_UNARY:
3822 return simplify_unary_operation (code, mode,
3823 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3824 case RTX_COMM_ARITH:
3825 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3826 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3828 /* Fall through.... */
3830 case RTX_BIN_ARITH:
3831 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3833 case RTX_TERNARY:
3834 case RTX_BITFIELD_OPS:
3835 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3836 XEXP (x, 0), XEXP (x, 1),
3837 XEXP (x, 2));
3839 case RTX_COMPARE:
3840 case RTX_COMM_COMPARE:
3841 return simplify_relational_operation (code, mode,
3842 ((GET_MODE (XEXP (x, 0))
3843 != VOIDmode)
3844 ? GET_MODE (XEXP (x, 0))
3845 : GET_MODE (XEXP (x, 1))),
3846 XEXP (x, 0),
3847 XEXP (x, 1));
3849 case RTX_EXTRA:
3850 if (code == SUBREG)
3851 return simplify_gen_subreg (mode, SUBREG_REG (x),
3852 GET_MODE (SUBREG_REG (x)),
3853 SUBREG_BYTE (x));
3854 break;
3856 case RTX_OBJ:
3857 if (code == LO_SUM)
3859 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3860 if (GET_CODE (XEXP (x, 0)) == HIGH
3861 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3862 return XEXP (x, 1);
3864 break;
3866 default:
3867 break;
3869 return NULL;