* config/frv/frv.c (frv_ifcvt_modify_insn): Don't leave alone
[official-gcc.git] / gcc / simplify-rtx.c
blobffa87cd785fc368b155f2409e8b0cc49aec281e0
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static bool associative_constant_p (rtx);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 rtx, rtx);
60 /* Negate a CONST_INT rtx, truncating (because a conversion from a
61 maximally negative number can overflow). */
62 static rtx
63 neg_const_int (enum machine_mode mode, rtx i)
65 return gen_int_mode (- INTVAL (i), mode);
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
72 rtx
73 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
74 rtx op1)
76 rtx tem;
78 /* Put complex operands first and constants second if commutative. */
79 if (GET_RTX_CLASS (code) == 'c'
80 && swap_commutative_operands_p (op0, op1))
81 tem = op0, op0 = op1, op1 = tem;
83 /* If this simplifies, do it. */
84 tem = simplify_binary_operation (code, mode, op0, op1);
85 if (tem)
86 return tem;
88 /* Handle addition and subtraction specially. Otherwise, just form
89 the operation. */
91 if (code == PLUS || code == MINUS)
93 tem = simplify_plus_minus (code, mode, op0, op1, 1);
94 if (tem)
95 return tem;
98 return gen_rtx_fmt_ee (code, mode, op0, op1);
101 /* If X is a MEM referencing the constant pool, return the real value.
102 Otherwise return X. */
104 avoid_constant_pool_reference (rtx x)
106 rtx c, tmp, addr;
107 enum machine_mode cmode;
109 switch (GET_CODE (x))
111 case MEM:
112 break;
114 case FLOAT_EXTEND:
115 /* Handle float extensions of constant pool references. */
116 tmp = XEXP (x, 0);
117 c = avoid_constant_pool_reference (tmp);
118 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
120 REAL_VALUE_TYPE d;
122 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
123 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
125 return x;
127 default:
128 return x;
131 addr = XEXP (x, 0);
133 /* Call target hook to avoid the effects of -fpic etc.... */
134 addr = (*targetm.delegitimize_address) (addr);
136 if (GET_CODE (addr) == LO_SUM)
137 addr = XEXP (addr, 1);
139 if (GET_CODE (addr) != SYMBOL_REF
140 || ! CONSTANT_POOL_ADDRESS_P (addr))
141 return x;
143 c = get_pool_constant (addr);
144 cmode = get_pool_mode (addr);
146 /* If we're accessing the constant in a different mode than it was
147 originally stored, attempt to fix that up via subreg simplifications.
148 If that fails we have no choice but to return the original memory. */
149 if (cmode != GET_MODE (x))
151 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
152 return c ? c : x;
155 return c;
158 /* Make a unary operation by first seeing if it folds and otherwise making
159 the specified operation. */
162 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
163 enum machine_mode op_mode)
165 rtx tem;
167 /* If this simplifies, use it. */
168 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
169 return tem;
171 return gen_rtx_fmt_e (code, mode, op);
174 /* Likewise for ternary operations. */
177 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
178 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
180 rtx tem;
182 /* If this simplifies, use it. */
183 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
184 op0, op1, op2)))
185 return tem;
187 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
195 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
196 enum machine_mode cmp_mode, rtx op0, rtx op1)
198 rtx tem;
200 if (cmp_mode == VOIDmode)
201 cmp_mode = GET_MODE (op0);
202 if (cmp_mode == VOIDmode)
203 cmp_mode = GET_MODE (op1);
205 if (cmp_mode != VOIDmode)
207 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
209 if (tem)
211 #ifdef FLOAT_STORE_FLAG_VALUE
212 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
214 REAL_VALUE_TYPE val;
215 if (tem == const0_rtx)
216 return CONST0_RTX (mode);
217 if (tem != const_true_rtx)
218 abort ();
219 val = FLOAT_STORE_FLAG_VALUE (mode);
220 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
222 #endif
223 return tem;
227 /* For the following tests, ensure const0_rtx is op1. */
228 if (swap_commutative_operands_p (op0, op1)
229 || (op0 == const0_rtx && op1 != const0_rtx))
230 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
232 /* If op0 is a compare, extract the comparison arguments from it. */
233 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
234 return simplify_gen_relational (code, mode, VOIDmode,
235 XEXP (op0, 0), XEXP (op0, 1));
237 /* If op0 is a comparison, extract the comparison arguments form it. */
238 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
240 if (code == NE)
242 if (GET_MODE (op0) == mode)
243 return op0;
244 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
245 XEXP (op0, 0), XEXP (op0, 1));
247 else if (code == EQ)
249 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
250 if (new != UNKNOWN)
251 return simplify_gen_relational (new, mode, VOIDmode,
252 XEXP (op0, 0), XEXP (op0, 1));
256 return gen_rtx_fmt_ee (code, mode, op0, op1);
259 /* Replace all occurrences of OLD in X with NEW and try to simplify the
260 resulting RTX. Return a new RTX which is as simplified as possible. */
263 simplify_replace_rtx (rtx x, rtx old, rtx new)
265 enum rtx_code code = GET_CODE (x);
266 enum machine_mode mode = GET_MODE (x);
267 enum machine_mode op_mode;
268 rtx op0, op1, op2;
270 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
271 to build a new expression substituting recursively. If we can't do
272 anything, return our input. */
274 if (x == old)
275 return new;
277 switch (GET_RTX_CLASS (code))
279 case '1':
280 op0 = XEXP (x, 0);
281 op_mode = GET_MODE (op0);
282 op0 = simplify_replace_rtx (op0, old, new);
283 if (op0 == XEXP (x, 0))
284 return x;
285 return simplify_gen_unary (code, mode, op0, op_mode);
287 case '2':
288 case 'c':
289 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
290 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
291 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
292 return x;
293 return simplify_gen_binary (code, mode, op0, op1);
295 case '<':
296 op0 = XEXP (x, 0);
297 op1 = XEXP (x, 1);
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old, new);
300 op1 = simplify_replace_rtx (op1, old, new);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302 return x;
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
305 case '3':
306 case 'b':
307 op0 = XEXP (x, 0);
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old, new);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
313 return x;
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
318 case 'x':
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
323 if (op0 == SUBREG_REG (x))
324 return x;
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 return op0 ? op0 : x;
330 break;
332 case 'o':
333 if (code == MEM)
335 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
336 if (op0 == XEXP (x, 0))
337 return x;
338 return replace_equiv_address_nv (x, op0);
340 else if (code == LO_SUM)
342 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
347 return op1;
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
350 return x;
351 return gen_rtx_LO_SUM (mode, op0, op1);
353 else if (code == REG)
355 if (REG_P (old) && REGNO (x) == REGNO (old))
356 return new;
358 break;
360 default:
361 break;
363 return x;
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
373 unsigned int width = GET_MODE_BITSIZE (mode);
374 rtx trueop = avoid_constant_pool_reference (op);
376 if (code == VEC_DUPLICATE)
378 if (!VECTOR_MODE_P (mode))
379 abort ();
380 if (GET_MODE (trueop) != VOIDmode
381 && !VECTOR_MODE_P (GET_MODE (trueop))
382 && GET_MODE_INNER (mode) != GET_MODE (trueop))
383 abort ();
384 if (GET_MODE (trueop) != VOIDmode
385 && VECTOR_MODE_P (GET_MODE (trueop))
386 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
387 abort ();
388 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
389 || GET_CODE (trueop) == CONST_VECTOR)
391 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
392 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
393 rtvec v = rtvec_alloc (n_elts);
394 unsigned int i;
396 if (GET_CODE (trueop) != CONST_VECTOR)
397 for (i = 0; i < n_elts; i++)
398 RTVEC_ELT (v, i) = trueop;
399 else
401 enum machine_mode inmode = GET_MODE (trueop);
402 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
403 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
405 if (in_n_elts >= n_elts || n_elts % in_n_elts)
406 abort ();
407 for (i = 0; i < n_elts; i++)
408 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
410 return gen_rtx_CONST_VECTOR (mode, v);
413 else if (GET_CODE (op) == CONST)
414 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
416 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
418 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
419 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
420 enum machine_mode opmode = GET_MODE (trueop);
421 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
422 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
423 rtvec v = rtvec_alloc (n_elts);
424 unsigned int i;
426 if (op_n_elts != n_elts)
427 abort ();
429 for (i = 0; i < n_elts; i++)
431 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
432 CONST_VECTOR_ELT (trueop, i),
433 GET_MODE_INNER (opmode));
434 if (!x)
435 return 0;
436 RTVEC_ELT (v, i) = x;
438 return gen_rtx_CONST_VECTOR (mode, v);
441 /* The order of these tests is critical so that, for example, we don't
442 check the wrong mode (input vs. output) for a conversion operation,
443 such as FIX. At some point, this should be simplified. */
445 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
446 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
448 HOST_WIDE_INT hv, lv;
449 REAL_VALUE_TYPE d;
451 if (GET_CODE (trueop) == CONST_INT)
452 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
453 else
454 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
456 REAL_VALUE_FROM_INT (d, lv, hv, mode);
457 d = real_value_truncate (mode, d);
458 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
460 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
461 && (GET_CODE (trueop) == CONST_DOUBLE
462 || GET_CODE (trueop) == CONST_INT))
464 HOST_WIDE_INT hv, lv;
465 REAL_VALUE_TYPE d;
467 if (GET_CODE (trueop) == CONST_INT)
468 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
469 else
470 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
472 if (op_mode == VOIDmode)
474 /* We don't know how to interpret negative-looking numbers in
475 this case, so don't try to fold those. */
476 if (hv < 0)
477 return 0;
479 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
481 else
482 hv = 0, lv &= GET_MODE_MASK (op_mode);
484 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
485 d = real_value_truncate (mode, d);
486 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
489 if (GET_CODE (trueop) == CONST_INT
490 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
492 HOST_WIDE_INT arg0 = INTVAL (trueop);
493 HOST_WIDE_INT val;
495 switch (code)
497 case NOT:
498 val = ~ arg0;
499 break;
501 case NEG:
502 val = - arg0;
503 break;
505 case ABS:
506 val = (arg0 >= 0 ? arg0 : - arg0);
507 break;
509 case FFS:
510 /* Don't use ffs here. Instead, get low order bit and then its
511 number. If arg0 is zero, this will return 0, as desired. */
512 arg0 &= GET_MODE_MASK (mode);
513 val = exact_log2 (arg0 & (- arg0)) + 1;
514 break;
516 case CLZ:
517 arg0 &= GET_MODE_MASK (mode);
518 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
520 else
521 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
522 break;
524 case CTZ:
525 arg0 &= GET_MODE_MASK (mode);
526 if (arg0 == 0)
528 /* Even if the value at zero is undefined, we have to come
529 up with some replacement. Seems good enough. */
530 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
531 val = GET_MODE_BITSIZE (mode);
533 else
534 val = exact_log2 (arg0 & -arg0);
535 break;
537 case POPCOUNT:
538 arg0 &= GET_MODE_MASK (mode);
539 val = 0;
540 while (arg0)
541 val++, arg0 &= arg0 - 1;
542 break;
544 case PARITY:
545 arg0 &= GET_MODE_MASK (mode);
546 val = 0;
547 while (arg0)
548 val++, arg0 &= arg0 - 1;
549 val &= 1;
550 break;
552 case TRUNCATE:
553 val = arg0;
554 break;
556 case ZERO_EXTEND:
557 /* When zero-extending a CONST_INT, we need to know its
558 original mode. */
559 if (op_mode == VOIDmode)
560 abort ();
561 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
563 /* If we were really extending the mode,
564 we would have to distinguish between zero-extension
565 and sign-extension. */
566 if (width != GET_MODE_BITSIZE (op_mode))
567 abort ();
568 val = arg0;
570 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
571 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
572 else
573 return 0;
574 break;
576 case SIGN_EXTEND:
577 if (op_mode == VOIDmode)
578 op_mode = mode;
579 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
581 /* If we were really extending the mode,
582 we would have to distinguish between zero-extension
583 and sign-extension. */
584 if (width != GET_MODE_BITSIZE (op_mode))
585 abort ();
586 val = arg0;
588 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
591 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
592 if (val
593 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
594 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
596 else
597 return 0;
598 break;
600 case SQRT:
601 case FLOAT_EXTEND:
602 case FLOAT_TRUNCATE:
603 case SS_TRUNCATE:
604 case US_TRUNCATE:
605 return 0;
607 default:
608 abort ();
611 val = trunc_int_for_mode (val, mode);
613 return GEN_INT (val);
616 /* We can do some operations on integer CONST_DOUBLEs. Also allow
617 for a DImode operation on a CONST_INT. */
618 else if (GET_MODE (trueop) == VOIDmode
619 && width <= HOST_BITS_PER_WIDE_INT * 2
620 && (GET_CODE (trueop) == CONST_DOUBLE
621 || GET_CODE (trueop) == CONST_INT))
623 unsigned HOST_WIDE_INT l1, lv;
624 HOST_WIDE_INT h1, hv;
626 if (GET_CODE (trueop) == CONST_DOUBLE)
627 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
628 else
629 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
631 switch (code)
633 case NOT:
634 lv = ~ l1;
635 hv = ~ h1;
636 break;
638 case NEG:
639 neg_double (l1, h1, &lv, &hv);
640 break;
642 case ABS:
643 if (h1 < 0)
644 neg_double (l1, h1, &lv, &hv);
645 else
646 lv = l1, hv = h1;
647 break;
649 case FFS:
650 hv = 0;
651 if (l1 == 0)
653 if (h1 == 0)
654 lv = 0;
655 else
656 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
658 else
659 lv = exact_log2 (l1 & -l1) + 1;
660 break;
662 case CLZ:
663 hv = 0;
664 if (h1 != 0)
665 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
666 - HOST_BITS_PER_WIDE_INT;
667 else if (l1 != 0)
668 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
669 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
670 lv = GET_MODE_BITSIZE (mode);
671 break;
673 case CTZ:
674 hv = 0;
675 if (l1 != 0)
676 lv = exact_log2 (l1 & -l1);
677 else if (h1 != 0)
678 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
679 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
680 lv = GET_MODE_BITSIZE (mode);
681 break;
683 case POPCOUNT:
684 hv = 0;
685 lv = 0;
686 while (l1)
687 lv++, l1 &= l1 - 1;
688 while (h1)
689 lv++, h1 &= h1 - 1;
690 break;
692 case PARITY:
693 hv = 0;
694 lv = 0;
695 while (l1)
696 lv++, l1 &= l1 - 1;
697 while (h1)
698 lv++, h1 &= h1 - 1;
699 lv &= 1;
700 break;
702 case TRUNCATE:
703 /* This is just a change-of-mode, so do nothing. */
704 lv = l1, hv = h1;
705 break;
707 case ZERO_EXTEND:
708 if (op_mode == VOIDmode)
709 abort ();
711 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
712 return 0;
714 hv = 0;
715 lv = l1 & GET_MODE_MASK (op_mode);
716 break;
718 case SIGN_EXTEND:
719 if (op_mode == VOIDmode
720 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
721 return 0;
722 else
724 lv = l1 & GET_MODE_MASK (op_mode);
725 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
726 && (lv & ((HOST_WIDE_INT) 1
727 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
728 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
730 hv = HWI_SIGN_EXTEND (lv);
732 break;
734 case SQRT:
735 return 0;
737 default:
738 return 0;
741 return immed_double_const (lv, hv, mode);
744 else if (GET_CODE (trueop) == CONST_DOUBLE
745 && GET_MODE_CLASS (mode) == MODE_FLOAT)
747 REAL_VALUE_TYPE d, t;
748 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
750 switch (code)
752 case SQRT:
753 if (HONOR_SNANS (mode) && real_isnan (&d))
754 return 0;
755 real_sqrt (&t, mode, &d);
756 d = t;
757 break;
758 case ABS:
759 d = REAL_VALUE_ABS (d);
760 break;
761 case NEG:
762 d = REAL_VALUE_NEGATE (d);
763 break;
764 case FLOAT_TRUNCATE:
765 d = real_value_truncate (mode, d);
766 break;
767 case FLOAT_EXTEND:
768 /* All this does is change the mode. */
769 break;
770 case FIX:
771 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
772 break;
774 default:
775 abort ();
777 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
780 else if (GET_CODE (trueop) == CONST_DOUBLE
781 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
782 && GET_MODE_CLASS (mode) == MODE_INT
783 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
785 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
786 operators are intentionally left unspecified (to ease implementation
787 by target backends), for consistency, this routine implements the
788 same semantics for constant folding as used by the middle-end. */
790 HOST_WIDE_INT xh, xl, th, tl;
791 REAL_VALUE_TYPE x, t;
792 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
793 switch (code)
795 case FIX:
796 if (REAL_VALUE_ISNAN (x))
797 return const0_rtx;
799 /* Test against the signed upper bound. */
800 if (width > HOST_BITS_PER_WIDE_INT)
802 th = ((unsigned HOST_WIDE_INT) 1
803 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
804 tl = -1;
806 else
808 th = 0;
809 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
811 real_from_integer (&t, VOIDmode, tl, th, 0);
812 if (REAL_VALUES_LESS (t, x))
814 xh = th;
815 xl = tl;
816 break;
819 /* Test against the signed lower bound. */
820 if (width > HOST_BITS_PER_WIDE_INT)
822 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
823 tl = 0;
825 else
827 th = -1;
828 tl = (HOST_WIDE_INT) -1 << (width - 1);
830 real_from_integer (&t, VOIDmode, tl, th, 0);
831 if (REAL_VALUES_LESS (x, t))
833 xh = th;
834 xl = tl;
835 break;
837 REAL_VALUE_TO_INT (&xl, &xh, x);
838 break;
840 case UNSIGNED_FIX:
841 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
842 return const0_rtx;
844 /* Test against the unsigned upper bound. */
845 if (width == 2*HOST_BITS_PER_WIDE_INT)
847 th = -1;
848 tl = -1;
850 else if (width >= HOST_BITS_PER_WIDE_INT)
852 th = ((unsigned HOST_WIDE_INT) 1
853 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
854 tl = -1;
856 else
858 th = 0;
859 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
861 real_from_integer (&t, VOIDmode, tl, th, 1);
862 if (REAL_VALUES_LESS (t, x))
864 xh = th;
865 xl = tl;
866 break;
869 REAL_VALUE_TO_INT (&xl, &xh, x);
870 break;
872 default:
873 abort ();
875 return immed_double_const (xl, xh, mode);
878 /* This was formerly used only for non-IEEE float.
879 eggert@twinsun.com says it is safe for IEEE also. */
880 else
882 enum rtx_code reversed;
883 rtx temp;
885 /* There are some simplifications we can do even if the operands
886 aren't constant. */
887 switch (code)
889 case NOT:
890 /* (not (not X)) == X. */
891 if (GET_CODE (op) == NOT)
892 return XEXP (op, 0);
894 /* (not (eq X Y)) == (ne X Y), etc. */
895 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
896 && (mode == BImode || STORE_FLAG_VALUE == -1)
897 && ((reversed = reversed_comparison_code (op, NULL_RTX))
898 != UNKNOWN))
899 return simplify_gen_relational (reversed, mode, VOIDmode,
900 XEXP (op, 0), XEXP (op, 1));
902 /* (not (plus X -1)) can become (neg X). */
903 if (GET_CODE (op) == PLUS
904 && XEXP (op, 1) == constm1_rtx)
905 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
907 /* Similarly, (not (neg X)) is (plus X -1). */
908 if (GET_CODE (op) == NEG)
909 return plus_constant (XEXP (op, 0), -1);
911 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
912 if (GET_CODE (op) == XOR
913 && GET_CODE (XEXP (op, 1)) == CONST_INT
914 && (temp = simplify_unary_operation (NOT, mode,
915 XEXP (op, 1),
916 mode)) != 0)
917 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
920 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
921 operands other than 1, but that is not valid. We could do a
922 similar simplification for (not (lshiftrt C X)) where C is
923 just the sign bit, but this doesn't seem common enough to
924 bother with. */
925 if (GET_CODE (op) == ASHIFT
926 && XEXP (op, 0) == const1_rtx)
928 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
929 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
932 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
933 by reversing the comparison code if valid. */
934 if (STORE_FLAG_VALUE == -1
935 && GET_RTX_CLASS (GET_CODE (op)) == '<'
936 && (reversed = reversed_comparison_code (op, NULL_RTX))
937 != UNKNOWN)
938 return simplify_gen_relational (reversed, mode, VOIDmode,
939 XEXP (op, 0), XEXP (op, 1));
941 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
942 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
943 so we can perform the above simplification. */
945 if (STORE_FLAG_VALUE == -1
946 && GET_CODE (op) == ASHIFTRT
947 && GET_CODE (XEXP (op, 1)) == CONST_INT
948 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
949 return simplify_gen_relational (GE, mode, VOIDmode,
950 XEXP (op, 0), const0_rtx);
952 break;
954 case NEG:
955 /* (neg (neg X)) == X. */
956 if (GET_CODE (op) == NEG)
957 return XEXP (op, 0);
959 /* (neg (plus X 1)) can become (not X). */
960 if (GET_CODE (op) == PLUS
961 && XEXP (op, 1) == const1_rtx)
962 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
964 /* Similarly, (neg (not X)) is (plus X 1). */
965 if (GET_CODE (op) == NOT)
966 return plus_constant (XEXP (op, 0), 1);
968 /* (neg (minus X Y)) can become (minus Y X). This transformation
969 isn't safe for modes with signed zeros, since if X and Y are
970 both +0, (minus Y X) is the same as (minus X Y). If the
971 rounding mode is towards +infinity (or -infinity) then the two
972 expressions will be rounded differently. */
973 if (GET_CODE (op) == MINUS
974 && !HONOR_SIGNED_ZEROS (mode)
975 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
976 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
977 XEXP (op, 0));
979 if (GET_CODE (op) == PLUS
980 && !HONOR_SIGNED_ZEROS (mode)
981 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
983 /* (neg (plus A C)) is simplified to (minus -C A). */
984 if (GET_CODE (XEXP (op, 1)) == CONST_INT
985 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
987 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
988 mode);
989 if (temp)
990 return simplify_gen_binary (MINUS, mode, temp,
991 XEXP (op, 0));
994 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
995 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
996 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
999 /* (neg (mult A B)) becomes (mult (neg A) B).
1000 This works even for floating-point values. */
1001 if (GET_CODE (op) == MULT
1002 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1004 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1005 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1008 /* NEG commutes with ASHIFT since it is multiplication. Only do
1009 this if we can then eliminate the NEG (e.g., if the operand
1010 is a constant). */
1011 if (GET_CODE (op) == ASHIFT)
1013 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1014 mode);
1015 if (temp)
1016 return simplify_gen_binary (ASHIFT, mode, temp,
1017 XEXP (op, 1));
1020 break;
1022 case SIGN_EXTEND:
1023 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1024 becomes just the MINUS if its mode is MODE. This allows
1025 folding switch statements on machines using casesi (such as
1026 the VAX). */
1027 if (GET_CODE (op) == TRUNCATE
1028 && GET_MODE (XEXP (op, 0)) == mode
1029 && GET_CODE (XEXP (op, 0)) == MINUS
1030 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1031 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1032 return XEXP (op, 0);
1034 /* Check for a sign extension of a subreg of a promoted
1035 variable, where the promotion is sign-extended, and the
1036 target mode is the same as the variable's promotion. */
1037 if (GET_CODE (op) == SUBREG
1038 && SUBREG_PROMOTED_VAR_P (op)
1039 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1040 && GET_MODE (XEXP (op, 0)) == mode)
1041 return XEXP (op, 0);
1043 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1044 if (! POINTERS_EXTEND_UNSIGNED
1045 && mode == Pmode && GET_MODE (op) == ptr_mode
1046 && (CONSTANT_P (op)
1047 || (GET_CODE (op) == SUBREG
1048 && GET_CODE (SUBREG_REG (op)) == REG
1049 && REG_POINTER (SUBREG_REG (op))
1050 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1051 return convert_memory_address (Pmode, op);
1052 #endif
1053 break;
1055 case ZERO_EXTEND:
1056 /* Check for a zero extension of a subreg of a promoted
1057 variable, where the promotion is zero-extended, and the
1058 target mode is the same as the variable's promotion. */
1059 if (GET_CODE (op) == SUBREG
1060 && SUBREG_PROMOTED_VAR_P (op)
1061 && SUBREG_PROMOTED_UNSIGNED_P (op)
1062 && GET_MODE (XEXP (op, 0)) == mode)
1063 return XEXP (op, 0);
1065 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1066 if (POINTERS_EXTEND_UNSIGNED > 0
1067 && mode == Pmode && GET_MODE (op) == ptr_mode
1068 && (CONSTANT_P (op)
1069 || (GET_CODE (op) == SUBREG
1070 && GET_CODE (SUBREG_REG (op)) == REG
1071 && REG_POINTER (SUBREG_REG (op))
1072 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1073 return convert_memory_address (Pmode, op);
1074 #endif
1075 break;
1077 default:
1078 break;
1081 return 0;
1085 /* Subroutine of simplify_associative_operation. Return true if rtx OP
1086 is a suitable integer or floating point immediate constant. */
1087 static bool
1088 associative_constant_p (rtx op)
1090 if (GET_CODE (op) == CONST_INT
1091 || GET_CODE (op) == CONST_DOUBLE)
1092 return true;
1093 op = avoid_constant_pool_reference (op);
1094 return GET_CODE (op) == CONST_INT
1095 || GET_CODE (op) == CONST_DOUBLE;
1098 /* Subroutine of simplify_binary_operation to simplify an associative
1099 binary operation CODE with result mode MODE, operating on OP0 and OP1.
1100 Return 0 if no simplification is possible. */
1101 static rtx
1102 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1103 rtx op0, rtx op1)
1105 rtx tem;
1107 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
1108 if (GET_CODE (op0) == code
1109 && associative_constant_p (op1)
1110 && associative_constant_p (XEXP (op0, 1)))
1112 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1113 if (! tem)
1114 return tem;
1115 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1118 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1119 if (GET_CODE (op0) == code
1120 && GET_CODE (op1) == code
1121 && associative_constant_p (XEXP (op0, 1))
1122 && associative_constant_p (XEXP (op1, 1)))
1124 rtx c = simplify_binary_operation (code, mode,
1125 XEXP (op0, 1), XEXP (op1, 1));
1126 if (! c)
1127 return 0;
1128 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1129 return simplify_gen_binary (code, mode, tem, c);
1132 /* Canonicalize (x op c) op y as (x op y) op c. */
1133 if (GET_CODE (op0) == code
1134 && associative_constant_p (XEXP (op0, 1)))
1136 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1137 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1140 /* Canonicalize x op (y op c) as (x op y) op c. */
1141 if (GET_CODE (op1) == code
1142 && associative_constant_p (XEXP (op1, 1)))
1144 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1145 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1148 return 0;
1151 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1152 and OP1. Return 0 if no simplification is possible.
1154 Don't use this for relational operations such as EQ or LT.
1155 Use simplify_relational_operation instead. */
1157 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1158 rtx op0, rtx op1)
1160 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1161 HOST_WIDE_INT val;
1162 unsigned int width = GET_MODE_BITSIZE (mode);
1163 rtx tem;
1164 rtx trueop0 = avoid_constant_pool_reference (op0);
1165 rtx trueop1 = avoid_constant_pool_reference (op1);
1167 /* Relational operations don't work here. We must know the mode
1168 of the operands in order to do the comparison correctly.
1169 Assuming a full word can give incorrect results.
1170 Consider comparing 128 with -128 in QImode. */
1172 if (GET_RTX_CLASS (code) == '<')
1173 abort ();
1175 /* Make sure the constant is second. */
1176 if (GET_RTX_CLASS (code) == 'c'
1177 && swap_commutative_operands_p (trueop0, trueop1))
1179 tem = op0, op0 = op1, op1 = tem;
1180 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1183 if (VECTOR_MODE_P (mode)
1184 && GET_CODE (trueop0) == CONST_VECTOR
1185 && GET_CODE (trueop1) == CONST_VECTOR)
1187 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1188 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1189 enum machine_mode op0mode = GET_MODE (trueop0);
1190 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1191 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1192 enum machine_mode op1mode = GET_MODE (trueop1);
1193 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1194 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1195 rtvec v = rtvec_alloc (n_elts);
1196 unsigned int i;
1198 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1199 abort ();
1201 for (i = 0; i < n_elts; i++)
1203 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1204 CONST_VECTOR_ELT (trueop0, i),
1205 CONST_VECTOR_ELT (trueop1, i));
1206 if (!x)
1207 return 0;
1208 RTVEC_ELT (v, i) = x;
1211 return gen_rtx_CONST_VECTOR (mode, v);
1214 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1215 && GET_CODE (trueop0) == CONST_DOUBLE
1216 && GET_CODE (trueop1) == CONST_DOUBLE
1217 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1219 REAL_VALUE_TYPE f0, f1, value;
1221 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1222 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1223 f0 = real_value_truncate (mode, f0);
1224 f1 = real_value_truncate (mode, f1);
1226 if (HONOR_SNANS (mode)
1227 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1228 return 0;
1230 if (code == DIV
1231 && REAL_VALUES_EQUAL (f1, dconst0)
1232 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1233 return 0;
1235 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1237 value = real_value_truncate (mode, value);
1238 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1241 /* We can fold some multi-word operations. */
1242 if (GET_MODE_CLASS (mode) == MODE_INT
1243 && width == HOST_BITS_PER_WIDE_INT * 2
1244 && (GET_CODE (trueop0) == CONST_DOUBLE
1245 || GET_CODE (trueop0) == CONST_INT)
1246 && (GET_CODE (trueop1) == CONST_DOUBLE
1247 || GET_CODE (trueop1) == CONST_INT))
1249 unsigned HOST_WIDE_INT l1, l2, lv;
1250 HOST_WIDE_INT h1, h2, hv;
1252 if (GET_CODE (trueop0) == CONST_DOUBLE)
1253 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1254 else
1255 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1257 if (GET_CODE (trueop1) == CONST_DOUBLE)
1258 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1259 else
1260 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1262 switch (code)
1264 case MINUS:
1265 /* A - B == A + (-B). */
1266 neg_double (l2, h2, &lv, &hv);
1267 l2 = lv, h2 = hv;
1269 /* Fall through.... */
1271 case PLUS:
1272 add_double (l1, h1, l2, h2, &lv, &hv);
1273 break;
1275 case MULT:
1276 mul_double (l1, h1, l2, h2, &lv, &hv);
1277 break;
1279 case DIV: case MOD: case UDIV: case UMOD:
1280 /* We'd need to include tree.h to do this and it doesn't seem worth
1281 it. */
1282 return 0;
1284 case AND:
1285 lv = l1 & l2, hv = h1 & h2;
1286 break;
1288 case IOR:
1289 lv = l1 | l2, hv = h1 | h2;
1290 break;
1292 case XOR:
1293 lv = l1 ^ l2, hv = h1 ^ h2;
1294 break;
1296 case SMIN:
1297 if (h1 < h2
1298 || (h1 == h2
1299 && ((unsigned HOST_WIDE_INT) l1
1300 < (unsigned HOST_WIDE_INT) l2)))
1301 lv = l1, hv = h1;
1302 else
1303 lv = l2, hv = h2;
1304 break;
1306 case SMAX:
1307 if (h1 > h2
1308 || (h1 == h2
1309 && ((unsigned HOST_WIDE_INT) l1
1310 > (unsigned HOST_WIDE_INT) l2)))
1311 lv = l1, hv = h1;
1312 else
1313 lv = l2, hv = h2;
1314 break;
1316 case UMIN:
1317 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1318 || (h1 == h2
1319 && ((unsigned HOST_WIDE_INT) l1
1320 < (unsigned HOST_WIDE_INT) l2)))
1321 lv = l1, hv = h1;
1322 else
1323 lv = l2, hv = h2;
1324 break;
1326 case UMAX:
1327 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1328 || (h1 == h2
1329 && ((unsigned HOST_WIDE_INT) l1
1330 > (unsigned HOST_WIDE_INT) l2)))
1331 lv = l1, hv = h1;
1332 else
1333 lv = l2, hv = h2;
1334 break;
1336 case LSHIFTRT: case ASHIFTRT:
1337 case ASHIFT:
1338 case ROTATE: case ROTATERT:
1339 #ifdef SHIFT_COUNT_TRUNCATED
1340 if (SHIFT_COUNT_TRUNCATED)
1341 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1342 #endif
1344 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1345 return 0;
1347 if (code == LSHIFTRT || code == ASHIFTRT)
1348 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1349 code == ASHIFTRT);
1350 else if (code == ASHIFT)
1351 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1352 else if (code == ROTATE)
1353 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1354 else /* code == ROTATERT */
1355 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1356 break;
1358 default:
1359 return 0;
1362 return immed_double_const (lv, hv, mode);
1365 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1366 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1368 /* Even if we can't compute a constant result,
1369 there are some cases worth simplifying. */
1371 switch (code)
1373 case PLUS:
1374 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1375 when x is NaN, infinite, or finite and nonzero. They aren't
1376 when x is -0 and the rounding mode is not towards -infinity,
1377 since (-0) + 0 is then 0. */
1378 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1379 return op0;
1381 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1382 transformations are safe even for IEEE. */
1383 if (GET_CODE (op0) == NEG)
1384 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1385 else if (GET_CODE (op1) == NEG)
1386 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1388 /* (~a) + 1 -> -a */
1389 if (INTEGRAL_MODE_P (mode)
1390 && GET_CODE (op0) == NOT
1391 && trueop1 == const1_rtx)
1392 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1394 /* Handle both-operands-constant cases. We can only add
1395 CONST_INTs to constants since the sum of relocatable symbols
1396 can't be handled by most assemblers. Don't add CONST_INT
1397 to CONST_INT since overflow won't be computed properly if wider
1398 than HOST_BITS_PER_WIDE_INT. */
1400 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1401 && GET_CODE (op1) == CONST_INT)
1402 return plus_constant (op0, INTVAL (op1));
1403 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1404 && GET_CODE (op0) == CONST_INT)
1405 return plus_constant (op1, INTVAL (op0));
1407 /* See if this is something like X * C - X or vice versa or
1408 if the multiplication is written as a shift. If so, we can
1409 distribute and make a new multiply, shift, or maybe just
1410 have X (if C is 2 in the example above). But don't make
1411 real multiply if we didn't have one before. */
1413 if (! FLOAT_MODE_P (mode))
1415 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1416 rtx lhs = op0, rhs = op1;
1417 int had_mult = 0;
1419 if (GET_CODE (lhs) == NEG)
1420 coeff0 = -1, lhs = XEXP (lhs, 0);
1421 else if (GET_CODE (lhs) == MULT
1422 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1424 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1425 had_mult = 1;
1427 else if (GET_CODE (lhs) == ASHIFT
1428 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1429 && INTVAL (XEXP (lhs, 1)) >= 0
1430 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1432 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1433 lhs = XEXP (lhs, 0);
1436 if (GET_CODE (rhs) == NEG)
1437 coeff1 = -1, rhs = XEXP (rhs, 0);
1438 else if (GET_CODE (rhs) == MULT
1439 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1441 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1442 had_mult = 1;
1444 else if (GET_CODE (rhs) == ASHIFT
1445 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1446 && INTVAL (XEXP (rhs, 1)) >= 0
1447 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1449 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1450 rhs = XEXP (rhs, 0);
1453 if (rtx_equal_p (lhs, rhs))
1455 tem = simplify_gen_binary (MULT, mode, lhs,
1456 GEN_INT (coeff0 + coeff1));
1457 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1461 /* If one of the operands is a PLUS or a MINUS, see if we can
1462 simplify this by the associative law.
1463 Don't use the associative law for floating point.
1464 The inaccuracy makes it nonassociative,
1465 and subtle programs can break if operations are associated. */
1467 if (INTEGRAL_MODE_P (mode)
1468 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1469 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1470 || (GET_CODE (op0) == CONST
1471 && GET_CODE (XEXP (op0, 0)) == PLUS)
1472 || (GET_CODE (op1) == CONST
1473 && GET_CODE (XEXP (op1, 0)) == PLUS))
1474 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1475 return tem;
1477 /* Reassociate floating point addition only when the user
1478 specifies unsafe math optimizations. */
1479 if (FLOAT_MODE_P (mode)
1480 && flag_unsafe_math_optimizations)
1482 tem = simplify_associative_operation (code, mode, op0, op1);
1483 if (tem)
1484 return tem;
1486 break;
1488 case COMPARE:
1489 #ifdef HAVE_cc0
1490 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1491 using cc0, in which case we want to leave it as a COMPARE
1492 so we can distinguish it from a register-register-copy.
1494 In IEEE floating point, x-0 is not the same as x. */
1496 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1497 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1498 && trueop1 == CONST0_RTX (mode))
1499 return op0;
1500 #endif
1502 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1503 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1504 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1505 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1507 rtx xop00 = XEXP (op0, 0);
1508 rtx xop10 = XEXP (op1, 0);
1510 #ifdef HAVE_cc0
1511 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1512 #else
1513 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1514 && GET_MODE (xop00) == GET_MODE (xop10)
1515 && REGNO (xop00) == REGNO (xop10)
1516 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1517 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1518 #endif
1519 return xop00;
1521 break;
1523 case MINUS:
1524 /* We can't assume x-x is 0 even with non-IEEE floating point,
1525 but since it is zero except in very strange circumstances, we
1526 will treat it as zero with -funsafe-math-optimizations. */
1527 if (rtx_equal_p (trueop0, trueop1)
1528 && ! side_effects_p (op0)
1529 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1530 return CONST0_RTX (mode);
1532 /* Change subtraction from zero into negation. (0 - x) is the
1533 same as -x when x is NaN, infinite, or finite and nonzero.
1534 But if the mode has signed zeros, and does not round towards
1535 -infinity, then 0 - 0 is 0, not -0. */
1536 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1537 return simplify_gen_unary (NEG, mode, op1, mode);
1539 /* (-1 - a) is ~a. */
1540 if (trueop0 == constm1_rtx)
1541 return simplify_gen_unary (NOT, mode, op1, mode);
1543 /* Subtracting 0 has no effect unless the mode has signed zeros
1544 and supports rounding towards -infinity. In such a case,
1545 0 - 0 is -0. */
1546 if (!(HONOR_SIGNED_ZEROS (mode)
1547 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1548 && trueop1 == CONST0_RTX (mode))
1549 return op0;
1551 /* See if this is something like X * C - X or vice versa or
1552 if the multiplication is written as a shift. If so, we can
1553 distribute and make a new multiply, shift, or maybe just
1554 have X (if C is 2 in the example above). But don't make
1555 real multiply if we didn't have one before. */
1557 if (! FLOAT_MODE_P (mode))
1559 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1560 rtx lhs = op0, rhs = op1;
1561 int had_mult = 0;
1563 if (GET_CODE (lhs) == NEG)
1564 coeff0 = -1, lhs = XEXP (lhs, 0);
1565 else if (GET_CODE (lhs) == MULT
1566 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1568 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1569 had_mult = 1;
1571 else if (GET_CODE (lhs) == ASHIFT
1572 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1573 && INTVAL (XEXP (lhs, 1)) >= 0
1574 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1576 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1577 lhs = XEXP (lhs, 0);
1580 if (GET_CODE (rhs) == NEG)
1581 coeff1 = - 1, rhs = XEXP (rhs, 0);
1582 else if (GET_CODE (rhs) == MULT
1583 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1585 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1586 had_mult = 1;
1588 else if (GET_CODE (rhs) == ASHIFT
1589 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1590 && INTVAL (XEXP (rhs, 1)) >= 0
1591 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1593 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1594 rhs = XEXP (rhs, 0);
1597 if (rtx_equal_p (lhs, rhs))
1599 tem = simplify_gen_binary (MULT, mode, lhs,
1600 GEN_INT (coeff0 - coeff1));
1601 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1605 /* (a - (-b)) -> (a + b). True even for IEEE. */
1606 if (GET_CODE (op1) == NEG)
1607 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1609 /* (-x - c) may be simplified as (-c - x). */
1610 if (GET_CODE (op0) == NEG
1611 && (GET_CODE (op1) == CONST_INT
1612 || GET_CODE (op1) == CONST_DOUBLE))
1614 tem = simplify_unary_operation (NEG, mode, op1, mode);
1615 if (tem)
1616 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1619 /* If one of the operands is a PLUS or a MINUS, see if we can
1620 simplify this by the associative law.
1621 Don't use the associative law for floating point.
1622 The inaccuracy makes it nonassociative,
1623 and subtle programs can break if operations are associated. */
1625 if (INTEGRAL_MODE_P (mode)
1626 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1627 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1628 || (GET_CODE (op0) == CONST
1629 && GET_CODE (XEXP (op0, 0)) == PLUS)
1630 || (GET_CODE (op1) == CONST
1631 && GET_CODE (XEXP (op1, 0)) == PLUS))
1632 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1633 return tem;
1635 /* Don't let a relocatable value get a negative coeff. */
1636 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1637 return simplify_gen_binary (PLUS, mode,
1638 op0,
1639 neg_const_int (mode, op1));
1641 /* (x - (x & y)) -> (x & ~y) */
1642 if (GET_CODE (op1) == AND)
1644 if (rtx_equal_p (op0, XEXP (op1, 0)))
1646 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1647 GET_MODE (XEXP (op1, 1)));
1648 return simplify_gen_binary (AND, mode, op0, tem);
1650 if (rtx_equal_p (op0, XEXP (op1, 1)))
1652 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1653 GET_MODE (XEXP (op1, 0)));
1654 return simplify_gen_binary (AND, mode, op0, tem);
1657 break;
1659 case MULT:
1660 if (trueop1 == constm1_rtx)
1661 return simplify_gen_unary (NEG, mode, op0, mode);
1663 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1664 x is NaN, since x * 0 is then also NaN. Nor is it valid
1665 when the mode has signed zeros, since multiplying a negative
1666 number by 0 will give -0, not 0. */
1667 if (!HONOR_NANS (mode)
1668 && !HONOR_SIGNED_ZEROS (mode)
1669 && trueop1 == CONST0_RTX (mode)
1670 && ! side_effects_p (op0))
1671 return op1;
1673 /* In IEEE floating point, x*1 is not equivalent to x for
1674 signalling NaNs. */
1675 if (!HONOR_SNANS (mode)
1676 && trueop1 == CONST1_RTX (mode))
1677 return op0;
1679 /* Convert multiply by constant power of two into shift unless
1680 we are still generating RTL. This test is a kludge. */
1681 if (GET_CODE (trueop1) == CONST_INT
1682 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1683 /* If the mode is larger than the host word size, and the
1684 uppermost bit is set, then this isn't a power of two due
1685 to implicit sign extension. */
1686 && (width <= HOST_BITS_PER_WIDE_INT
1687 || val != HOST_BITS_PER_WIDE_INT - 1)
1688 && ! rtx_equal_function_value_matters)
1689 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1691 /* x*2 is x+x and x*(-1) is -x */
1692 if (GET_CODE (trueop1) == CONST_DOUBLE
1693 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1694 && GET_MODE (op0) == mode)
1696 REAL_VALUE_TYPE d;
1697 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1699 if (REAL_VALUES_EQUAL (d, dconst2))
1700 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1702 if (REAL_VALUES_EQUAL (d, dconstm1))
1703 return simplify_gen_unary (NEG, mode, op0, mode);
1706 /* Reassociate multiplication, but for floating point MULTs
1707 only when the user specifies unsafe math optimizations. */
1708 if (! FLOAT_MODE_P (mode)
1709 || flag_unsafe_math_optimizations)
1711 tem = simplify_associative_operation (code, mode, op0, op1);
1712 if (tem)
1713 return tem;
1715 break;
1717 case IOR:
1718 if (trueop1 == const0_rtx)
1719 return op0;
1720 if (GET_CODE (trueop1) == CONST_INT
1721 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1722 == GET_MODE_MASK (mode)))
1723 return op1;
1724 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1725 return op0;
1726 /* A | (~A) -> -1 */
1727 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1728 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1729 && ! side_effects_p (op0)
1730 && GET_MODE_CLASS (mode) != MODE_CC)
1731 return constm1_rtx;
1732 tem = simplify_associative_operation (code, mode, op0, op1);
1733 if (tem)
1734 return tem;
1735 break;
1737 case XOR:
1738 if (trueop1 == const0_rtx)
1739 return op0;
1740 if (GET_CODE (trueop1) == CONST_INT
1741 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1742 == GET_MODE_MASK (mode)))
1743 return simplify_gen_unary (NOT, mode, op0, mode);
1744 if (trueop0 == trueop1 && ! side_effects_p (op0)
1745 && GET_MODE_CLASS (mode) != MODE_CC)
1746 return const0_rtx;
1747 tem = simplify_associative_operation (code, mode, op0, op1);
1748 if (tem)
1749 return tem;
1750 break;
1752 case AND:
1753 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1754 return const0_rtx;
1755 if (GET_CODE (trueop1) == CONST_INT
1756 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1757 == GET_MODE_MASK (mode)))
1758 return op0;
1759 if (trueop0 == trueop1 && ! side_effects_p (op0)
1760 && GET_MODE_CLASS (mode) != MODE_CC)
1761 return op0;
1762 /* A & (~A) -> 0 */
1763 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1764 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1765 && ! side_effects_p (op0)
1766 && GET_MODE_CLASS (mode) != MODE_CC)
1767 return const0_rtx;
1768 tem = simplify_associative_operation (code, mode, op0, op1);
1769 if (tem)
1770 return tem;
1771 break;
1773 case UDIV:
1774 /* Convert divide by power of two into shift (divide by 1 handled
1775 below). */
1776 if (GET_CODE (trueop1) == CONST_INT
1777 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1778 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1780 /* Fall through.... */
1782 case DIV:
1783 if (trueop1 == CONST1_RTX (mode))
1785 /* On some platforms DIV uses narrower mode than its
1786 operands. */
1787 rtx x = gen_lowpart_common (mode, op0);
1788 if (x)
1789 return x;
1790 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1791 return gen_lowpart_SUBREG (mode, op0);
1792 else
1793 return op0;
1796 /* Maybe change 0 / x to 0. This transformation isn't safe for
1797 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1798 Nor is it safe for modes with signed zeros, since dividing
1799 0 by a negative number gives -0, not 0. */
1800 if (!HONOR_NANS (mode)
1801 && !HONOR_SIGNED_ZEROS (mode)
1802 && trueop0 == CONST0_RTX (mode)
1803 && ! side_effects_p (op1))
1804 return op0;
1806 /* Change division by a constant into multiplication. Only do
1807 this with -funsafe-math-optimizations. */
1808 else if (GET_CODE (trueop1) == CONST_DOUBLE
1809 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1810 && trueop1 != CONST0_RTX (mode)
1811 && flag_unsafe_math_optimizations)
1813 REAL_VALUE_TYPE d;
1814 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1816 if (! REAL_VALUES_EQUAL (d, dconst0))
1818 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1819 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1820 return simplify_gen_binary (MULT, mode, op0, tem);
1823 break;
1825 case UMOD:
1826 /* Handle modulus by power of two (mod with 1 handled below). */
1827 if (GET_CODE (trueop1) == CONST_INT
1828 && exact_log2 (INTVAL (trueop1)) > 0)
1829 return simplify_gen_binary (AND, mode, op0,
1830 GEN_INT (INTVAL (op1) - 1));
1832 /* Fall through.... */
1834 case MOD:
1835 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1836 && ! side_effects_p (op0) && ! side_effects_p (op1))
1837 return const0_rtx;
1838 break;
1840 case ROTATERT:
1841 case ROTATE:
1842 case ASHIFTRT:
1843 /* Rotating ~0 always results in ~0. */
1844 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1845 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1846 && ! side_effects_p (op1))
1847 return op0;
1849 /* Fall through.... */
1851 case ASHIFT:
1852 case LSHIFTRT:
1853 if (trueop1 == const0_rtx)
1854 return op0;
1855 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1856 return op0;
1857 break;
1859 case SMIN:
1860 if (width <= HOST_BITS_PER_WIDE_INT
1861 && GET_CODE (trueop1) == CONST_INT
1862 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1863 && ! side_effects_p (op0))
1864 return op1;
1865 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1866 return op0;
1867 tem = simplify_associative_operation (code, mode, op0, op1);
1868 if (tem)
1869 return tem;
1870 break;
1872 case SMAX:
1873 if (width <= HOST_BITS_PER_WIDE_INT
1874 && GET_CODE (trueop1) == CONST_INT
1875 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1876 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1877 && ! side_effects_p (op0))
1878 return op1;
1879 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1880 return op0;
1881 tem = simplify_associative_operation (code, mode, op0, op1);
1882 if (tem)
1883 return tem;
1884 break;
1886 case UMIN:
1887 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1888 return op1;
1889 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1890 return op0;
1891 tem = simplify_associative_operation (code, mode, op0, op1);
1892 if (tem)
1893 return tem;
1894 break;
1896 case UMAX:
1897 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1898 return op1;
1899 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1900 return op0;
1901 tem = simplify_associative_operation (code, mode, op0, op1);
1902 if (tem)
1903 return tem;
1904 break;
1906 case SS_PLUS:
1907 case US_PLUS:
1908 case SS_MINUS:
1909 case US_MINUS:
1910 /* ??? There are simplifications that can be done. */
1911 return 0;
1913 case VEC_SELECT:
1914 if (!VECTOR_MODE_P (mode))
1916 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1917 || (mode
1918 != GET_MODE_INNER (GET_MODE (trueop0)))
1919 || GET_CODE (trueop1) != PARALLEL
1920 || XVECLEN (trueop1, 0) != 1
1921 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1922 abort ();
1924 if (GET_CODE (trueop0) == CONST_VECTOR)
1925 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1927 else
1929 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1930 || (GET_MODE_INNER (mode)
1931 != GET_MODE_INNER (GET_MODE (trueop0)))
1932 || GET_CODE (trueop1) != PARALLEL)
1933 abort ();
1935 if (GET_CODE (trueop0) == CONST_VECTOR)
1937 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1938 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1939 rtvec v = rtvec_alloc (n_elts);
1940 unsigned int i;
1942 if (XVECLEN (trueop1, 0) != (int) n_elts)
1943 abort ();
1944 for (i = 0; i < n_elts; i++)
1946 rtx x = XVECEXP (trueop1, 0, i);
1948 if (GET_CODE (x) != CONST_INT)
1949 abort ();
1950 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1953 return gen_rtx_CONST_VECTOR (mode, v);
1956 return 0;
1957 case VEC_CONCAT:
1959 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1960 ? GET_MODE (trueop0)
1961 : GET_MODE_INNER (mode));
1962 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1963 ? GET_MODE (trueop1)
1964 : GET_MODE_INNER (mode));
1966 if (!VECTOR_MODE_P (mode)
1967 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1968 != GET_MODE_SIZE (mode)))
1969 abort ();
1971 if ((VECTOR_MODE_P (op0_mode)
1972 && (GET_MODE_INNER (mode)
1973 != GET_MODE_INNER (op0_mode)))
1974 || (!VECTOR_MODE_P (op0_mode)
1975 && GET_MODE_INNER (mode) != op0_mode))
1976 abort ();
1978 if ((VECTOR_MODE_P (op1_mode)
1979 && (GET_MODE_INNER (mode)
1980 != GET_MODE_INNER (op1_mode)))
1981 || (!VECTOR_MODE_P (op1_mode)
1982 && GET_MODE_INNER (mode) != op1_mode))
1983 abort ();
1985 if ((GET_CODE (trueop0) == CONST_VECTOR
1986 || GET_CODE (trueop0) == CONST_INT
1987 || GET_CODE (trueop0) == CONST_DOUBLE)
1988 && (GET_CODE (trueop1) == CONST_VECTOR
1989 || GET_CODE (trueop1) == CONST_INT
1990 || GET_CODE (trueop1) == CONST_DOUBLE))
1992 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1993 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1994 rtvec v = rtvec_alloc (n_elts);
1995 unsigned int i;
1996 unsigned in_n_elts = 1;
1998 if (VECTOR_MODE_P (op0_mode))
1999 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2000 for (i = 0; i < n_elts; i++)
2002 if (i < in_n_elts)
2004 if (!VECTOR_MODE_P (op0_mode))
2005 RTVEC_ELT (v, i) = trueop0;
2006 else
2007 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2009 else
2011 if (!VECTOR_MODE_P (op1_mode))
2012 RTVEC_ELT (v, i) = trueop1;
2013 else
2014 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2015 i - in_n_elts);
2019 return gen_rtx_CONST_VECTOR (mode, v);
2022 return 0;
2024 default:
2025 abort ();
2028 return 0;
2031 /* Get the integer argument values in two forms:
2032 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2034 arg0 = INTVAL (trueop0);
2035 arg1 = INTVAL (trueop1);
2037 if (width < HOST_BITS_PER_WIDE_INT)
2039 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2040 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2042 arg0s = arg0;
2043 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2044 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2046 arg1s = arg1;
2047 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2048 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2050 else
2052 arg0s = arg0;
2053 arg1s = arg1;
2056 /* Compute the value of the arithmetic. */
2058 switch (code)
2060 case PLUS:
2061 val = arg0s + arg1s;
2062 break;
2064 case MINUS:
2065 val = arg0s - arg1s;
2066 break;
2068 case MULT:
2069 val = arg0s * arg1s;
2070 break;
2072 case DIV:
2073 if (arg1s == 0
2074 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2075 && arg1s == -1))
2076 return 0;
2077 val = arg0s / arg1s;
2078 break;
2080 case MOD:
2081 if (arg1s == 0
2082 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2083 && arg1s == -1))
2084 return 0;
2085 val = arg0s % arg1s;
2086 break;
2088 case UDIV:
2089 if (arg1 == 0
2090 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2091 && arg1s == -1))
2092 return 0;
2093 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2094 break;
2096 case UMOD:
2097 if (arg1 == 0
2098 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2099 && arg1s == -1))
2100 return 0;
2101 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2102 break;
2104 case AND:
2105 val = arg0 & arg1;
2106 break;
2108 case IOR:
2109 val = arg0 | arg1;
2110 break;
2112 case XOR:
2113 val = arg0 ^ arg1;
2114 break;
2116 case LSHIFTRT:
2117 /* If shift count is undefined, don't fold it; let the machine do
2118 what it wants. But truncate it if the machine will do that. */
2119 if (arg1 < 0)
2120 return 0;
2122 #ifdef SHIFT_COUNT_TRUNCATED
2123 if (SHIFT_COUNT_TRUNCATED)
2124 arg1 %= width;
2125 #endif
2127 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2128 break;
2130 case ASHIFT:
2131 if (arg1 < 0)
2132 return 0;
2134 #ifdef SHIFT_COUNT_TRUNCATED
2135 if (SHIFT_COUNT_TRUNCATED)
2136 arg1 %= width;
2137 #endif
2139 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2140 break;
2142 case ASHIFTRT:
2143 if (arg1 < 0)
2144 return 0;
2146 #ifdef SHIFT_COUNT_TRUNCATED
2147 if (SHIFT_COUNT_TRUNCATED)
2148 arg1 %= width;
2149 #endif
2151 val = arg0s >> arg1;
2153 /* Bootstrap compiler may not have sign extended the right shift.
2154 Manually extend the sign to insure bootstrap cc matches gcc. */
2155 if (arg0s < 0 && arg1 > 0)
2156 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2158 break;
2160 case ROTATERT:
2161 if (arg1 < 0)
2162 return 0;
2164 arg1 %= width;
2165 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2166 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2167 break;
2169 case ROTATE:
2170 if (arg1 < 0)
2171 return 0;
2173 arg1 %= width;
2174 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2175 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2176 break;
2178 case COMPARE:
2179 /* Do nothing here. */
2180 return 0;
2182 case SMIN:
2183 val = arg0s <= arg1s ? arg0s : arg1s;
2184 break;
2186 case UMIN:
2187 val = ((unsigned HOST_WIDE_INT) arg0
2188 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2189 break;
2191 case SMAX:
2192 val = arg0s > arg1s ? arg0s : arg1s;
2193 break;
2195 case UMAX:
2196 val = ((unsigned HOST_WIDE_INT) arg0
2197 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2198 break;
2200 case SS_PLUS:
2201 case US_PLUS:
2202 case SS_MINUS:
2203 case US_MINUS:
2204 /* ??? There are simplifications that can be done. */
2205 return 0;
2207 default:
2208 abort ();
2211 val = trunc_int_for_mode (val, mode);
2213 return GEN_INT (val);
2216 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2217 PLUS or MINUS.
2219 Rather than test for specific case, we do this by a brute-force method
2220 and do all possible simplifications until no more changes occur. Then
2221 we rebuild the operation.
2223 If FORCE is true, then always generate the rtx. This is used to
2224 canonicalize stuff emitted from simplify_gen_binary. Note that this
2225 can still fail if the rtx is too complex. It won't fail just because
2226 the result is not 'simpler' than the input, however. */
2228 struct simplify_plus_minus_op_data
2230 rtx op;
2231 int neg;
2234 static int
2235 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2237 const struct simplify_plus_minus_op_data *d1 = p1;
2238 const struct simplify_plus_minus_op_data *d2 = p2;
2240 return (commutative_operand_precedence (d2->op)
2241 - commutative_operand_precedence (d1->op));
2244 static rtx
2245 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2246 rtx op1, int force)
2248 struct simplify_plus_minus_op_data ops[8];
2249 rtx result, tem;
2250 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2251 int first, negate, changed;
2252 int i, j;
2254 memset (ops, 0, sizeof ops);
2256 /* Set up the two operands and then expand them until nothing has been
2257 changed. If we run out of room in our array, give up; this should
2258 almost never happen. */
2260 ops[0].op = op0;
2261 ops[0].neg = 0;
2262 ops[1].op = op1;
2263 ops[1].neg = (code == MINUS);
2267 changed = 0;
2269 for (i = 0; i < n_ops; i++)
2271 rtx this_op = ops[i].op;
2272 int this_neg = ops[i].neg;
2273 enum rtx_code this_code = GET_CODE (this_op);
2275 switch (this_code)
2277 case PLUS:
2278 case MINUS:
2279 if (n_ops == 7)
2280 return NULL_RTX;
2282 ops[n_ops].op = XEXP (this_op, 1);
2283 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2284 n_ops++;
2286 ops[i].op = XEXP (this_op, 0);
2287 input_ops++;
2288 changed = 1;
2289 break;
2291 case NEG:
2292 ops[i].op = XEXP (this_op, 0);
2293 ops[i].neg = ! this_neg;
2294 changed = 1;
2295 break;
2297 case CONST:
2298 if (n_ops < 7
2299 && GET_CODE (XEXP (this_op, 0)) == PLUS
2300 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2301 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2303 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2304 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2305 ops[n_ops].neg = this_neg;
2306 n_ops++;
2307 input_consts++;
2308 changed = 1;
2310 break;
2312 case NOT:
2313 /* ~a -> (-a - 1) */
2314 if (n_ops != 7)
2316 ops[n_ops].op = constm1_rtx;
2317 ops[n_ops++].neg = this_neg;
2318 ops[i].op = XEXP (this_op, 0);
2319 ops[i].neg = !this_neg;
2320 changed = 1;
2322 break;
2324 case CONST_INT:
2325 if (this_neg)
2327 ops[i].op = neg_const_int (mode, this_op);
2328 ops[i].neg = 0;
2329 changed = 1;
2331 break;
2333 default:
2334 break;
2338 while (changed);
2340 /* If we only have two operands, we can't do anything. */
2341 if (n_ops <= 2 && !force)
2342 return NULL_RTX;
2344 /* Count the number of CONSTs we didn't split above. */
2345 for (i = 0; i < n_ops; i++)
2346 if (GET_CODE (ops[i].op) == CONST)
2347 input_consts++;
2349 /* Now simplify each pair of operands until nothing changes. The first
2350 time through just simplify constants against each other. */
2352 first = 1;
2355 changed = first;
2357 for (i = 0; i < n_ops - 1; i++)
2358 for (j = i + 1; j < n_ops; j++)
2360 rtx lhs = ops[i].op, rhs = ops[j].op;
2361 int lneg = ops[i].neg, rneg = ops[j].neg;
2363 if (lhs != 0 && rhs != 0
2364 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2366 enum rtx_code ncode = PLUS;
2368 if (lneg != rneg)
2370 ncode = MINUS;
2371 if (lneg)
2372 tem = lhs, lhs = rhs, rhs = tem;
2374 else if (swap_commutative_operands_p (lhs, rhs))
2375 tem = lhs, lhs = rhs, rhs = tem;
2377 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2379 /* Reject "simplifications" that just wrap the two
2380 arguments in a CONST. Failure to do so can result
2381 in infinite recursion with simplify_binary_operation
2382 when it calls us to simplify CONST operations. */
2383 if (tem
2384 && ! (GET_CODE (tem) == CONST
2385 && GET_CODE (XEXP (tem, 0)) == ncode
2386 && XEXP (XEXP (tem, 0), 0) == lhs
2387 && XEXP (XEXP (tem, 0), 1) == rhs)
2388 /* Don't allow -x + -1 -> ~x simplifications in the
2389 first pass. This allows us the chance to combine
2390 the -1 with other constants. */
2391 && ! (first
2392 && GET_CODE (tem) == NOT
2393 && XEXP (tem, 0) == rhs))
2395 lneg &= rneg;
2396 if (GET_CODE (tem) == NEG)
2397 tem = XEXP (tem, 0), lneg = !lneg;
2398 if (GET_CODE (tem) == CONST_INT && lneg)
2399 tem = neg_const_int (mode, tem), lneg = 0;
2401 ops[i].op = tem;
2402 ops[i].neg = lneg;
2403 ops[j].op = NULL_RTX;
2404 changed = 1;
2409 first = 0;
2411 while (changed);
2413 /* Pack all the operands to the lower-numbered entries. */
2414 for (i = 0, j = 0; j < n_ops; j++)
2415 if (ops[j].op)
2416 ops[i++] = ops[j];
2417 n_ops = i;
2419 /* Sort the operations based on swap_commutative_operands_p. */
2420 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2422 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2423 if (n_ops == 2
2424 && GET_CODE (ops[1].op) == CONST_INT
2425 && CONSTANT_P (ops[0].op)
2426 && ops[0].neg)
2427 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2429 /* We suppressed creation of trivial CONST expressions in the
2430 combination loop to avoid recursion. Create one manually now.
2431 The combination loop should have ensured that there is exactly
2432 one CONST_INT, and the sort will have ensured that it is last
2433 in the array and that any other constant will be next-to-last. */
2435 if (n_ops > 1
2436 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2437 && CONSTANT_P (ops[n_ops - 2].op))
2439 rtx value = ops[n_ops - 1].op;
2440 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2441 value = neg_const_int (mode, value);
2442 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2443 n_ops--;
2446 /* Count the number of CONSTs that we generated. */
2447 n_consts = 0;
2448 for (i = 0; i < n_ops; i++)
2449 if (GET_CODE (ops[i].op) == CONST)
2450 n_consts++;
2452 /* Give up if we didn't reduce the number of operands we had. Make
2453 sure we count a CONST as two operands. If we have the same
2454 number of operands, but have made more CONSTs than before, this
2455 is also an improvement, so accept it. */
2456 if (!force
2457 && (n_ops + n_consts > input_ops
2458 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2459 return NULL_RTX;
2461 /* Put a non-negated operand first. If there aren't any, make all
2462 operands positive and negate the whole thing later. */
2464 negate = 0;
2465 for (i = 0; i < n_ops && ops[i].neg; i++)
2466 continue;
2467 if (i == n_ops)
2469 for (i = 0; i < n_ops; i++)
2470 ops[i].neg = 0;
2471 negate = 1;
2473 else if (i != 0)
2475 tem = ops[0].op;
2476 ops[0] = ops[i];
2477 ops[i].op = tem;
2478 ops[i].neg = 1;
2481 /* Now make the result by performing the requested operations. */
2482 result = ops[0].op;
2483 for (i = 1; i < n_ops; i++)
2484 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2485 mode, result, ops[i].op);
2487 return negate ? gen_rtx_NEG (mode, result) : result;
2490 /* Like simplify_binary_operation except used for relational operators.
2491 MODE is the mode of the operands, not that of the result. If MODE
2492 is VOIDmode, both operands must also be VOIDmode and we compare the
2493 operands in "infinite precision".
2495 If no simplification is possible, this function returns zero. Otherwise,
2496 it returns either const_true_rtx or const0_rtx. */
2499 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2500 rtx op0, rtx op1)
2502 int equal, op0lt, op0ltu, op1lt, op1ltu;
2503 rtx tem;
2504 rtx trueop0;
2505 rtx trueop1;
2507 if (mode == VOIDmode
2508 && (GET_MODE (op0) != VOIDmode
2509 || GET_MODE (op1) != VOIDmode))
2510 abort ();
2512 /* If op0 is a compare, extract the comparison arguments from it. */
2513 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2514 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2516 trueop0 = avoid_constant_pool_reference (op0);
2517 trueop1 = avoid_constant_pool_reference (op1);
2519 /* We can't simplify MODE_CC values since we don't know what the
2520 actual comparison is. */
2521 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2522 return 0;
2524 /* Make sure the constant is second. */
2525 if (swap_commutative_operands_p (trueop0, trueop1))
2527 tem = op0, op0 = op1, op1 = tem;
2528 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2529 code = swap_condition (code);
2532 /* For integer comparisons of A and B maybe we can simplify A - B and can
2533 then simplify a comparison of that with zero. If A and B are both either
2534 a register or a CONST_INT, this can't help; testing for these cases will
2535 prevent infinite recursion here and speed things up.
2537 If CODE is an unsigned comparison, then we can never do this optimization,
2538 because it gives an incorrect result if the subtraction wraps around zero.
2539 ANSI C defines unsigned operations such that they never overflow, and
2540 thus such cases can not be ignored. */
2542 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2543 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2544 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2545 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2546 && code != GTU && code != GEU && code != LTU && code != LEU)
2547 return simplify_relational_operation (signed_condition (code),
2548 mode, tem, const0_rtx);
2550 if (flag_unsafe_math_optimizations && code == ORDERED)
2551 return const_true_rtx;
2553 if (flag_unsafe_math_optimizations && code == UNORDERED)
2554 return const0_rtx;
2556 /* For modes without NaNs, if the two operands are equal, we know the
2557 result except if they have side-effects. */
2558 if (! HONOR_NANS (GET_MODE (trueop0))
2559 && rtx_equal_p (trueop0, trueop1)
2560 && ! side_effects_p (trueop0))
2561 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2563 /* If the operands are floating-point constants, see if we can fold
2564 the result. */
2565 else if (GET_CODE (trueop0) == CONST_DOUBLE
2566 && GET_CODE (trueop1) == CONST_DOUBLE
2567 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2569 REAL_VALUE_TYPE d0, d1;
2571 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2572 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2574 /* Comparisons are unordered iff at least one of the values is NaN. */
2575 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2576 switch (code)
2578 case UNEQ:
2579 case UNLT:
2580 case UNGT:
2581 case UNLE:
2582 case UNGE:
2583 case NE:
2584 case UNORDERED:
2585 return const_true_rtx;
2586 case EQ:
2587 case LT:
2588 case GT:
2589 case LE:
2590 case GE:
2591 case LTGT:
2592 case ORDERED:
2593 return const0_rtx;
2594 default:
2595 return 0;
2598 equal = REAL_VALUES_EQUAL (d0, d1);
2599 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2600 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2603 /* Otherwise, see if the operands are both integers. */
2604 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2605 && (GET_CODE (trueop0) == CONST_DOUBLE
2606 || GET_CODE (trueop0) == CONST_INT)
2607 && (GET_CODE (trueop1) == CONST_DOUBLE
2608 || GET_CODE (trueop1) == CONST_INT))
2610 int width = GET_MODE_BITSIZE (mode);
2611 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2612 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2614 /* Get the two words comprising each integer constant. */
2615 if (GET_CODE (trueop0) == CONST_DOUBLE)
2617 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2618 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2620 else
2622 l0u = l0s = INTVAL (trueop0);
2623 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2626 if (GET_CODE (trueop1) == CONST_DOUBLE)
2628 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2629 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2631 else
2633 l1u = l1s = INTVAL (trueop1);
2634 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2637 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2638 we have to sign or zero-extend the values. */
2639 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2641 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2642 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2644 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2645 l0s |= ((HOST_WIDE_INT) (-1) << width);
2647 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2648 l1s |= ((HOST_WIDE_INT) (-1) << width);
2650 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2651 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2653 equal = (h0u == h1u && l0u == l1u);
2654 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2655 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2656 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2657 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2660 /* Otherwise, there are some code-specific tests we can make. */
2661 else
2663 switch (code)
2665 case EQ:
2666 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2667 return const0_rtx;
2668 break;
2670 case NE:
2671 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2672 return const_true_rtx;
2673 break;
2675 case GEU:
2676 /* Unsigned values are never negative. */
2677 if (trueop1 == const0_rtx)
2678 return const_true_rtx;
2679 break;
2681 case LTU:
2682 if (trueop1 == const0_rtx)
2683 return const0_rtx;
2684 break;
2686 case LEU:
2687 /* Unsigned values are never greater than the largest
2688 unsigned value. */
2689 if (GET_CODE (trueop1) == CONST_INT
2690 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2691 && INTEGRAL_MODE_P (mode))
2692 return const_true_rtx;
2693 break;
2695 case GTU:
2696 if (GET_CODE (trueop1) == CONST_INT
2697 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2698 && INTEGRAL_MODE_P (mode))
2699 return const0_rtx;
2700 break;
2702 case LT:
2703 /* Optimize abs(x) < 0.0. */
2704 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2706 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2707 : trueop0;
2708 if (GET_CODE (tem) == ABS)
2709 return const0_rtx;
2711 break;
2713 case GE:
2714 /* Optimize abs(x) >= 0.0. */
2715 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2717 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2718 : trueop0;
2719 if (GET_CODE (tem) == ABS)
2720 return const_true_rtx;
2722 break;
2724 case UNGE:
2725 /* Optimize ! (abs(x) < 0.0). */
2726 if (trueop1 == CONST0_RTX (mode))
2728 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2729 : trueop0;
2730 if (GET_CODE (tem) == ABS)
2731 return const_true_rtx;
2733 break;
2735 default:
2736 break;
2739 return 0;
2742 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2743 as appropriate. */
2744 switch (code)
2746 case EQ:
2747 case UNEQ:
2748 return equal ? const_true_rtx : const0_rtx;
2749 case NE:
2750 case LTGT:
2751 return ! equal ? const_true_rtx : const0_rtx;
2752 case LT:
2753 case UNLT:
2754 return op0lt ? const_true_rtx : const0_rtx;
2755 case GT:
2756 case UNGT:
2757 return op1lt ? const_true_rtx : const0_rtx;
2758 case LTU:
2759 return op0ltu ? const_true_rtx : const0_rtx;
2760 case GTU:
2761 return op1ltu ? const_true_rtx : const0_rtx;
2762 case LE:
2763 case UNLE:
2764 return equal || op0lt ? const_true_rtx : const0_rtx;
2765 case GE:
2766 case UNGE:
2767 return equal || op1lt ? const_true_rtx : const0_rtx;
2768 case LEU:
2769 return equal || op0ltu ? const_true_rtx : const0_rtx;
2770 case GEU:
2771 return equal || op1ltu ? const_true_rtx : const0_rtx;
2772 case ORDERED:
2773 return const_true_rtx;
2774 case UNORDERED:
2775 return const0_rtx;
2776 default:
2777 abort ();
2781 /* Simplify CODE, an operation with result mode MODE and three operands,
2782 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2783 a constant. Return 0 if no simplifications is possible. */
2786 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2787 enum machine_mode op0_mode, rtx op0, rtx op1,
2788 rtx op2)
2790 unsigned int width = GET_MODE_BITSIZE (mode);
2792 /* VOIDmode means "infinite" precision. */
2793 if (width == 0)
2794 width = HOST_BITS_PER_WIDE_INT;
2796 switch (code)
2798 case SIGN_EXTRACT:
2799 case ZERO_EXTRACT:
2800 if (GET_CODE (op0) == CONST_INT
2801 && GET_CODE (op1) == CONST_INT
2802 && GET_CODE (op2) == CONST_INT
2803 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2804 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2806 /* Extracting a bit-field from a constant */
2807 HOST_WIDE_INT val = INTVAL (op0);
2809 if (BITS_BIG_ENDIAN)
2810 val >>= (GET_MODE_BITSIZE (op0_mode)
2811 - INTVAL (op2) - INTVAL (op1));
2812 else
2813 val >>= INTVAL (op2);
2815 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2817 /* First zero-extend. */
2818 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2819 /* If desired, propagate sign bit. */
2820 if (code == SIGN_EXTRACT
2821 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2822 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2825 /* Clear the bits that don't belong in our mode,
2826 unless they and our sign bit are all one.
2827 So we get either a reasonable negative value or a reasonable
2828 unsigned value for this mode. */
2829 if (width < HOST_BITS_PER_WIDE_INT
2830 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2831 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2832 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2834 return GEN_INT (val);
2836 break;
2838 case IF_THEN_ELSE:
2839 if (GET_CODE (op0) == CONST_INT)
2840 return op0 != const0_rtx ? op1 : op2;
2842 /* Convert c ? a : a into "a". */
2843 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2844 return op1;
2846 /* Convert a != b ? a : b into "a". */
2847 if (GET_CODE (op0) == NE
2848 && ! side_effects_p (op0)
2849 && ! HONOR_NANS (mode)
2850 && ! HONOR_SIGNED_ZEROS (mode)
2851 && ((rtx_equal_p (XEXP (op0, 0), op1)
2852 && rtx_equal_p (XEXP (op0, 1), op2))
2853 || (rtx_equal_p (XEXP (op0, 0), op2)
2854 && rtx_equal_p (XEXP (op0, 1), op1))))
2855 return op1;
2857 /* Convert a == b ? a : b into "b". */
2858 if (GET_CODE (op0) == EQ
2859 && ! side_effects_p (op0)
2860 && ! HONOR_NANS (mode)
2861 && ! HONOR_SIGNED_ZEROS (mode)
2862 && ((rtx_equal_p (XEXP (op0, 0), op1)
2863 && rtx_equal_p (XEXP (op0, 1), op2))
2864 || (rtx_equal_p (XEXP (op0, 0), op2)
2865 && rtx_equal_p (XEXP (op0, 1), op1))))
2866 return op2;
2868 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2870 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2871 ? GET_MODE (XEXP (op0, 1))
2872 : GET_MODE (XEXP (op0, 0)));
2873 rtx temp;
2874 if (cmp_mode == VOIDmode)
2875 cmp_mode = op0_mode;
2876 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2877 XEXP (op0, 0), XEXP (op0, 1));
2879 /* See if any simplifications were possible. */
2880 if (temp == const0_rtx)
2881 return op2;
2882 else if (temp == const_true_rtx)
2883 return op1;
2884 else if (temp)
2885 abort ();
2887 /* Look for happy constants in op1 and op2. */
2888 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2890 HOST_WIDE_INT t = INTVAL (op1);
2891 HOST_WIDE_INT f = INTVAL (op2);
2893 if (t == STORE_FLAG_VALUE && f == 0)
2894 code = GET_CODE (op0);
2895 else if (t == 0 && f == STORE_FLAG_VALUE)
2897 enum rtx_code tmp;
2898 tmp = reversed_comparison_code (op0, NULL_RTX);
2899 if (tmp == UNKNOWN)
2900 break;
2901 code = tmp;
2903 else
2904 break;
2906 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2909 break;
2911 case VEC_MERGE:
2912 if (GET_MODE (op0) != mode
2913 || GET_MODE (op1) != mode
2914 || !VECTOR_MODE_P (mode))
2915 abort ();
2916 op2 = avoid_constant_pool_reference (op2);
2917 if (GET_CODE (op2) == CONST_INT)
2919 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2920 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2921 int mask = (1 << n_elts) - 1;
2923 if (!(INTVAL (op2) & mask))
2924 return op1;
2925 if ((INTVAL (op2) & mask) == mask)
2926 return op0;
2928 op0 = avoid_constant_pool_reference (op0);
2929 op1 = avoid_constant_pool_reference (op1);
2930 if (GET_CODE (op0) == CONST_VECTOR
2931 && GET_CODE (op1) == CONST_VECTOR)
2933 rtvec v = rtvec_alloc (n_elts);
2934 unsigned int i;
2936 for (i = 0; i < n_elts; i++)
2937 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2938 ? CONST_VECTOR_ELT (op0, i)
2939 : CONST_VECTOR_ELT (op1, i));
2940 return gen_rtx_CONST_VECTOR (mode, v);
2943 break;
2945 default:
2946 abort ();
2949 return 0;
2952 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2953 Return 0 if no simplifications is possible. */
2955 simplify_subreg (enum machine_mode outermode, rtx op,
2956 enum machine_mode innermode, unsigned int byte)
2958 /* Little bit of sanity checking. */
2959 if (innermode == VOIDmode || outermode == VOIDmode
2960 || innermode == BLKmode || outermode == BLKmode)
2961 abort ();
2963 if (GET_MODE (op) != innermode
2964 && GET_MODE (op) != VOIDmode)
2965 abort ();
2967 if (byte % GET_MODE_SIZE (outermode)
2968 || byte >= GET_MODE_SIZE (innermode))
2969 abort ();
2971 if (outermode == innermode && !byte)
2972 return op;
2974 /* Simplify subregs of vector constants. */
2975 if (GET_CODE (op) == CONST_VECTOR)
2977 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2978 const unsigned int offset = byte / elt_size;
2979 rtx elt;
2981 if (GET_MODE_INNER (innermode) == outermode)
2983 elt = CONST_VECTOR_ELT (op, offset);
2985 /* ?? We probably don't need this copy_rtx because constants
2986 can be shared. ?? */
2988 return copy_rtx (elt);
2990 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2991 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2993 return (gen_rtx_CONST_VECTOR
2994 (outermode,
2995 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2996 &CONST_VECTOR_ELT (op, offset))));
2998 else if (GET_MODE_CLASS (outermode) == MODE_INT
2999 && (GET_MODE_SIZE (outermode) % elt_size == 0))
3001 /* This happens when the target register size is smaller then
3002 the vector mode, and we synthesize operations with vectors
3003 of elements that are smaller than the register size. */
3004 HOST_WIDE_INT sum = 0, high = 0;
3005 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
3006 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
3007 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
3008 int shift = BITS_PER_UNIT * elt_size;
3009 unsigned HOST_WIDE_INT unit_mask;
3011 unit_mask = (unsigned HOST_WIDE_INT) -1
3012 >> (sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - shift);
3014 for (; n_elts--; i += step)
3016 elt = CONST_VECTOR_ELT (op, i);
3017 if (GET_CODE (elt) == CONST_DOUBLE
3018 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
3020 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
3021 elt);
3022 if (! elt)
3023 return NULL_RTX;
3025 if (GET_CODE (elt) != CONST_INT)
3026 return NULL_RTX;
3027 /* Avoid overflow. */
3028 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
3029 return NULL_RTX;
3030 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
3031 sum = (sum << shift) + (INTVAL (elt) & unit_mask);
3033 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
3034 return GEN_INT (trunc_int_for_mode (sum, outermode));
3035 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
3036 return immed_double_const (sum, high, outermode);
3037 else
3038 return NULL_RTX;
3040 else if (GET_MODE_CLASS (outermode) == MODE_INT
3041 && (elt_size % GET_MODE_SIZE (outermode) == 0))
3043 enum machine_mode new_mode
3044 = int_mode_for_mode (GET_MODE_INNER (innermode));
3045 int subbyte = byte % elt_size;
3047 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
3048 if (! op)
3049 return NULL_RTX;
3050 return simplify_subreg (outermode, op, new_mode, subbyte);
3052 else if (GET_MODE_CLASS (outermode) == MODE_INT)
3053 /* This shouldn't happen, but let's not do anything stupid. */
3054 return NULL_RTX;
3057 /* Attempt to simplify constant to non-SUBREG expression. */
3058 if (CONSTANT_P (op))
3060 int offset, part;
3061 unsigned HOST_WIDE_INT val = 0;
3063 if (VECTOR_MODE_P (outermode))
3065 /* Construct a CONST_VECTOR from individual subregs. */
3066 enum machine_mode submode = GET_MODE_INNER (outermode);
3067 int subsize = GET_MODE_UNIT_SIZE (outermode);
3068 int i, elts = GET_MODE_NUNITS (outermode);
3069 rtvec v = rtvec_alloc (elts);
3070 rtx elt;
3072 for (i = 0; i < elts; i++, byte += subsize)
3074 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
3075 /* ??? It would be nice if we could actually make such subregs
3076 on targets that allow such relocations. */
3077 if (byte >= GET_MODE_SIZE (innermode))
3078 elt = CONST0_RTX (submode);
3079 else
3080 elt = simplify_subreg (submode, op, innermode, byte);
3081 if (! elt)
3082 return NULL_RTX;
3083 RTVEC_ELT (v, i) = elt;
3085 return gen_rtx_CONST_VECTOR (outermode, v);
3088 /* ??? This code is partly redundant with code below, but can handle
3089 the subregs of floats and similar corner cases.
3090 Later it we should move all simplification code here and rewrite
3091 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
3092 using SIMPLIFY_SUBREG. */
3093 if (subreg_lowpart_offset (outermode, innermode) == byte
3094 && GET_CODE (op) != CONST_VECTOR)
3096 rtx new = gen_lowpart_if_possible (outermode, op);
3097 if (new)
3098 return new;
3101 /* Similar comment as above apply here. */
3102 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
3103 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
3104 && GET_MODE_CLASS (outermode) == MODE_INT)
3106 rtx new = constant_subword (op,
3107 (byte / UNITS_PER_WORD),
3108 innermode);
3109 if (new)
3110 return new;
3113 if (GET_MODE_CLASS (outermode) != MODE_INT
3114 && GET_MODE_CLASS (outermode) != MODE_CC)
3116 enum machine_mode new_mode = int_mode_for_mode (outermode);
3118 if (new_mode != innermode || byte != 0)
3120 op = simplify_subreg (new_mode, op, innermode, byte);
3121 if (! op)
3122 return NULL_RTX;
3123 return simplify_subreg (outermode, op, new_mode, 0);
3127 offset = byte * BITS_PER_UNIT;
3128 switch (GET_CODE (op))
3130 case CONST_DOUBLE:
3131 if (GET_MODE (op) != VOIDmode)
3132 break;
3134 /* We can't handle this case yet. */
3135 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
3136 return NULL_RTX;
3138 part = offset >= HOST_BITS_PER_WIDE_INT;
3139 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
3140 && BYTES_BIG_ENDIAN)
3141 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
3142 && WORDS_BIG_ENDIAN))
3143 part = !part;
3144 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
3145 offset %= HOST_BITS_PER_WIDE_INT;
3147 /* We've already picked the word we want from a double, so
3148 pretend this is actually an integer. */
3149 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
3151 /* FALLTHROUGH */
3152 case CONST_INT:
3153 if (GET_CODE (op) == CONST_INT)
3154 val = INTVAL (op);
3156 /* We don't handle synthesizing of non-integral constants yet. */
3157 if (GET_MODE_CLASS (outermode) != MODE_INT)
3158 return NULL_RTX;
3160 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
3162 if (WORDS_BIG_ENDIAN)
3163 offset = (GET_MODE_BITSIZE (innermode)
3164 - GET_MODE_BITSIZE (outermode) - offset);
3165 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
3166 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
3167 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
3168 - 2 * (offset % BITS_PER_WORD));
3171 if (offset >= HOST_BITS_PER_WIDE_INT)
3172 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
3173 else
3175 val >>= offset;
3176 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
3177 val = trunc_int_for_mode (val, outermode);
3178 return GEN_INT (val);
3180 default:
3181 break;
3185 /* Changing mode twice with SUBREG => just change it once,
3186 or not at all if changing back op starting mode. */
3187 if (GET_CODE (op) == SUBREG)
3189 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3190 int final_offset = byte + SUBREG_BYTE (op);
3191 rtx new;
3193 if (outermode == innermostmode
3194 && byte == 0 && SUBREG_BYTE (op) == 0)
3195 return SUBREG_REG (op);
3197 /* The SUBREG_BYTE represents offset, as if the value were stored
3198 in memory. Irritating exception is paradoxical subreg, where
3199 we define SUBREG_BYTE to be 0. On big endian machines, this
3200 value should be negative. For a moment, undo this exception. */
3201 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3203 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3204 if (WORDS_BIG_ENDIAN)
3205 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3206 if (BYTES_BIG_ENDIAN)
3207 final_offset += difference % UNITS_PER_WORD;
3209 if (SUBREG_BYTE (op) == 0
3210 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3212 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3213 if (WORDS_BIG_ENDIAN)
3214 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3215 if (BYTES_BIG_ENDIAN)
3216 final_offset += difference % UNITS_PER_WORD;
3219 /* See whether resulting subreg will be paradoxical. */
3220 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3222 /* In nonparadoxical subregs we can't handle negative offsets. */
3223 if (final_offset < 0)
3224 return NULL_RTX;
3225 /* Bail out in case resulting subreg would be incorrect. */
3226 if (final_offset % GET_MODE_SIZE (outermode)
3227 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3228 return NULL_RTX;
3230 else
3232 int offset = 0;
3233 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3235 /* In paradoxical subreg, see if we are still looking on lower part.
3236 If so, our SUBREG_BYTE will be 0. */
3237 if (WORDS_BIG_ENDIAN)
3238 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3239 if (BYTES_BIG_ENDIAN)
3240 offset += difference % UNITS_PER_WORD;
3241 if (offset == final_offset)
3242 final_offset = 0;
3243 else
3244 return NULL_RTX;
3247 /* Recurse for further possible simplifications. */
3248 new = simplify_subreg (outermode, SUBREG_REG (op),
3249 GET_MODE (SUBREG_REG (op)),
3250 final_offset);
3251 if (new)
3252 return new;
3253 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3256 /* SUBREG of a hard register => just change the register number
3257 and/or mode. If the hard register is not valid in that mode,
3258 suppress this simplification. If the hard register is the stack,
3259 frame, or argument pointer, leave this as a SUBREG. */
3261 if (REG_P (op)
3262 && (! REG_FUNCTION_VALUE_P (op)
3263 || ! rtx_equal_function_value_matters)
3264 && REGNO (op) < FIRST_PSEUDO_REGISTER
3265 #ifdef CANNOT_CHANGE_MODE_CLASS
3266 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3267 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3268 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3269 #endif
3270 && ((reload_completed && !frame_pointer_needed)
3271 || (REGNO (op) != FRAME_POINTER_REGNUM
3272 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3273 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3274 #endif
3276 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3277 && REGNO (op) != ARG_POINTER_REGNUM
3278 #endif
3279 && REGNO (op) != STACK_POINTER_REGNUM
3280 && subreg_offset_representable_p (REGNO (op), innermode,
3281 byte, outermode))
3283 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3284 int final_regno = subreg_hard_regno (tem, 0);
3286 /* ??? We do allow it if the current REG is not valid for
3287 its mode. This is a kludge to work around how float/complex
3288 arguments are passed on 32-bit SPARC and should be fixed. */
3289 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3290 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3292 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3294 /* Propagate original regno. We don't have any way to specify
3295 the offset inside original regno, so do so only for lowpart.
3296 The information is used only by alias analysis that can not
3297 grog partial register anyway. */
3299 if (subreg_lowpart_offset (outermode, innermode) == byte)
3300 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3301 return x;
3305 /* If we have a SUBREG of a register that we are replacing and we are
3306 replacing it with a MEM, make a new MEM and try replacing the
3307 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3308 or if we would be widening it. */
3310 if (GET_CODE (op) == MEM
3311 && ! mode_dependent_address_p (XEXP (op, 0))
3312 /* Allow splitting of volatile memory references in case we don't
3313 have instruction to move the whole thing. */
3314 && (! MEM_VOLATILE_P (op)
3315 || ! have_insn_for (SET, innermode))
3316 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3317 return adjust_address_nv (op, outermode, byte);
3319 /* Handle complex values represented as CONCAT
3320 of real and imaginary part. */
3321 if (GET_CODE (op) == CONCAT)
3323 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3324 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3325 unsigned int final_offset;
3326 rtx res;
3328 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3329 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3330 if (res)
3331 return res;
3332 /* We can at least simplify it by referring directly to the relevant part. */
3333 return gen_rtx_SUBREG (outermode, part, final_offset);
3336 return NULL_RTX;
3338 /* Make a SUBREG operation or equivalent if it folds. */
3341 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3342 enum machine_mode innermode, unsigned int byte)
3344 rtx new;
3345 /* Little bit of sanity checking. */
3346 if (innermode == VOIDmode || outermode == VOIDmode
3347 || innermode == BLKmode || outermode == BLKmode)
3348 abort ();
3350 if (GET_MODE (op) != innermode
3351 && GET_MODE (op) != VOIDmode)
3352 abort ();
3354 if (byte % GET_MODE_SIZE (outermode)
3355 || byte >= GET_MODE_SIZE (innermode))
3356 abort ();
3358 if (GET_CODE (op) == QUEUED)
3359 return NULL_RTX;
3361 new = simplify_subreg (outermode, op, innermode, byte);
3362 if (new)
3363 return new;
3365 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3366 return NULL_RTX;
3368 return gen_rtx_SUBREG (outermode, op, byte);
3370 /* Simplify X, an rtx expression.
3372 Return the simplified expression or NULL if no simplifications
3373 were possible.
3375 This is the preferred entry point into the simplification routines;
3376 however, we still allow passes to call the more specific routines.
3378 Right now GCC has three (yes, three) major bodies of RTL simplification
3379 code that need to be unified.
3381 1. fold_rtx in cse.c. This code uses various CSE specific
3382 information to aid in RTL simplification.
3384 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3385 it uses combine specific information to aid in RTL
3386 simplification.
3388 3. The routines in this file.
3391 Long term we want to only have one body of simplification code; to
3392 get to that state I recommend the following steps:
3394 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3395 which are not pass dependent state into these routines.
3397 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3398 use this routine whenever possible.
3400 3. Allow for pass dependent state to be provided to these
3401 routines and add simplifications based on the pass dependent
3402 state. Remove code from cse.c & combine.c that becomes
3403 redundant/dead.
3405 It will take time, but ultimately the compiler will be easier to
3406 maintain and improve. It's totally silly that when we add a
3407 simplification that it needs to be added to 4 places (3 for RTL
3408 simplification and 1 for tree simplification. */
3411 simplify_rtx (rtx x)
3413 enum rtx_code code = GET_CODE (x);
3414 enum machine_mode mode = GET_MODE (x);
3415 rtx temp;
3417 switch (GET_RTX_CLASS (code))
3419 case '1':
3420 return simplify_unary_operation (code, mode,
3421 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3422 case 'c':
3423 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3424 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3426 /* Fall through.... */
3428 case '2':
3429 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3431 case '3':
3432 case 'b':
3433 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3434 XEXP (x, 0), XEXP (x, 1),
3435 XEXP (x, 2));
3437 case '<':
3438 temp = simplify_relational_operation (code,
3439 ((GET_MODE (XEXP (x, 0))
3440 != VOIDmode)
3441 ? GET_MODE (XEXP (x, 0))
3442 : GET_MODE (XEXP (x, 1))),
3443 XEXP (x, 0), XEXP (x, 1));
3444 #ifdef FLOAT_STORE_FLAG_VALUE
3445 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3447 if (temp == const0_rtx)
3448 temp = CONST0_RTX (mode);
3449 else
3450 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3451 mode);
3453 #endif
3454 return temp;
3456 case 'x':
3457 if (code == SUBREG)
3458 return simplify_gen_subreg (mode, SUBREG_REG (x),
3459 GET_MODE (SUBREG_REG (x)),
3460 SUBREG_BYTE (x));
3461 if (code == CONSTANT_P_RTX)
3463 if (CONSTANT_P (XEXP (x, 0)))
3464 return const1_rtx;
3466 break;
3468 case 'o':
3469 if (code == LO_SUM)
3471 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3472 if (GET_CODE (XEXP (x, 0)) == HIGH
3473 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3474 return XEXP (x, 1);
3476 break;
3478 default:
3479 break;
3481 return NULL;