* dbxout.c (current_file): Also wrap inside DBX_DEBUGGING_INFO ||
[official-gcc.git] / gcc / simplify-rtx.c
blobbf44b55e765dded038ce581a97f5e1613cd00d2b
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static bool associative_constant_p (rtx);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
58 rtx, rtx);
60 /* Negate a CONST_INT rtx, truncating (because a conversion from a
61 maximally negative number can overflow). */
62 static rtx
63 neg_const_int (enum machine_mode mode, rtx i)
65 return gen_int_mode (- INTVAL (i), mode);
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
72 rtx
73 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
74 rtx op1)
76 rtx tem;
78 /* Put complex operands first and constants second if commutative. */
79 if (GET_RTX_CLASS (code) == 'c'
80 && swap_commutative_operands_p (op0, op1))
81 tem = op0, op0 = op1, op1 = tem;
83 /* If this simplifies, do it. */
84 tem = simplify_binary_operation (code, mode, op0, op1);
85 if (tem)
86 return tem;
88 /* Handle addition and subtraction specially. Otherwise, just form
89 the operation. */
91 if (code == PLUS || code == MINUS)
93 tem = simplify_plus_minus (code, mode, op0, op1, 1);
94 if (tem)
95 return tem;
98 return gen_rtx_fmt_ee (code, mode, op0, op1);
101 /* If X is a MEM referencing the constant pool, return the real value.
102 Otherwise return X. */
104 avoid_constant_pool_reference (rtx x)
106 rtx c, tmp, addr;
107 enum machine_mode cmode;
109 switch (GET_CODE (x))
111 case MEM:
112 break;
114 case FLOAT_EXTEND:
115 /* Handle float extensions of constant pool references. */
116 tmp = XEXP (x, 0);
117 c = avoid_constant_pool_reference (tmp);
118 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
120 REAL_VALUE_TYPE d;
122 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
123 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
125 return x;
127 default:
128 return x;
131 addr = XEXP (x, 0);
133 /* Call target hook to avoid the effects of -fpic etc.... */
134 addr = (*targetm.delegitimize_address) (addr);
136 if (GET_CODE (addr) == LO_SUM)
137 addr = XEXP (addr, 1);
139 if (GET_CODE (addr) != SYMBOL_REF
140 || ! CONSTANT_POOL_ADDRESS_P (addr))
141 return x;
143 c = get_pool_constant (addr);
144 cmode = get_pool_mode (addr);
146 /* If we're accessing the constant in a different mode than it was
147 originally stored, attempt to fix that up via subreg simplifications.
148 If that fails we have no choice but to return the original memory. */
149 if (cmode != GET_MODE (x))
151 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
152 return c ? c : x;
155 return c;
158 /* Make a unary operation by first seeing if it folds and otherwise making
159 the specified operation. */
162 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
163 enum machine_mode op_mode)
165 rtx tem;
167 /* If this simplifies, use it. */
168 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
169 return tem;
171 return gen_rtx_fmt_e (code, mode, op);
174 /* Likewise for ternary operations. */
177 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
178 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
180 rtx tem;
182 /* If this simplifies, use it. */
183 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
184 op0, op1, op2)))
185 return tem;
187 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
195 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
196 enum machine_mode cmp_mode, rtx op0, rtx op1)
198 rtx tem;
200 if (cmp_mode == VOIDmode)
201 cmp_mode = GET_MODE (op0);
202 if (cmp_mode == VOIDmode)
203 cmp_mode = GET_MODE (op1);
205 if (cmp_mode != VOIDmode)
207 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
209 if (tem)
211 #ifdef FLOAT_STORE_FLAG_VALUE
212 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
214 REAL_VALUE_TYPE val;
215 if (tem == const0_rtx)
216 return CONST0_RTX (mode);
217 if (tem != const_true_rtx)
218 abort ();
219 val = FLOAT_STORE_FLAG_VALUE (mode);
220 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
222 #endif
223 return tem;
227 /* For the following tests, ensure const0_rtx is op1. */
228 if (swap_commutative_operands_p (op0, op1)
229 || (op0 == const0_rtx && op1 != const0_rtx))
230 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
232 /* If op0 is a compare, extract the comparison arguments from it. */
233 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
234 return simplify_gen_relational (code, mode, VOIDmode,
235 XEXP (op0, 0), XEXP (op0, 1));
237 /* If op0 is a comparison, extract the comparison arguments form it. */
238 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
240 if (code == NE)
242 if (GET_MODE (op0) == mode)
243 return op0;
244 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
245 XEXP (op0, 0), XEXP (op0, 1));
247 else if (code == EQ)
249 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
250 if (new != UNKNOWN)
251 return simplify_gen_relational (new, mode, VOIDmode,
252 XEXP (op0, 0), XEXP (op0, 1));
256 return gen_rtx_fmt_ee (code, mode, op0, op1);
259 /* Replace all occurrences of OLD in X with NEW and try to simplify the
260 resulting RTX. Return a new RTX which is as simplified as possible. */
263 simplify_replace_rtx (rtx x, rtx old, rtx new)
265 enum rtx_code code = GET_CODE (x);
266 enum machine_mode mode = GET_MODE (x);
267 enum machine_mode op_mode;
268 rtx op0, op1, op2;
270 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
271 to build a new expression substituting recursively. If we can't do
272 anything, return our input. */
274 if (x == old)
275 return new;
277 switch (GET_RTX_CLASS (code))
279 case '1':
280 op0 = XEXP (x, 0);
281 op_mode = GET_MODE (op0);
282 op0 = simplify_replace_rtx (op0, old, new);
283 if (op0 == XEXP (x, 0))
284 return x;
285 return simplify_gen_unary (code, mode, op0, op_mode);
287 case '2':
288 case 'c':
289 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
290 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
291 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
292 return x;
293 return simplify_gen_binary (code, mode, op0, op1);
295 case '<':
296 op0 = XEXP (x, 0);
297 op1 = XEXP (x, 1);
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old, new);
300 op1 = simplify_replace_rtx (op1, old, new);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
302 return x;
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
305 case '3':
306 case 'b':
307 op0 = XEXP (x, 0);
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old, new);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
313 return x;
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
318 case 'x':
319 /* The only case we try to handle is a SUBREG. */
320 if (code == SUBREG)
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
323 if (op0 == SUBREG_REG (x))
324 return x;
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
327 SUBREG_BYTE (x));
328 return op0 ? op0 : x;
330 break;
332 case 'o':
333 if (code == MEM)
335 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
336 if (op0 == XEXP (x, 0))
337 return x;
338 return replace_equiv_address_nv (x, op0);
340 else if (code == LO_SUM)
342 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
347 return op1;
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
350 return x;
351 return gen_rtx_LO_SUM (mode, op0, op1);
353 else if (code == REG)
355 if (REG_P (old) && REGNO (x) == REGNO (old))
356 return new;
358 break;
360 default:
361 break;
363 return x;
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
373 unsigned int width = GET_MODE_BITSIZE (mode);
374 rtx trueop = avoid_constant_pool_reference (op);
376 if (code == VEC_DUPLICATE)
378 if (!VECTOR_MODE_P (mode))
379 abort ();
380 if (GET_MODE (trueop) != VOIDmode
381 && !VECTOR_MODE_P (GET_MODE (trueop))
382 && GET_MODE_INNER (mode) != GET_MODE (trueop))
383 abort ();
384 if (GET_MODE (trueop) != VOIDmode
385 && VECTOR_MODE_P (GET_MODE (trueop))
386 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
387 abort ();
388 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
389 || GET_CODE (trueop) == CONST_VECTOR)
391 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
392 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
393 rtvec v = rtvec_alloc (n_elts);
394 unsigned int i;
396 if (GET_CODE (trueop) != CONST_VECTOR)
397 for (i = 0; i < n_elts; i++)
398 RTVEC_ELT (v, i) = trueop;
399 else
401 enum machine_mode inmode = GET_MODE (trueop);
402 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
403 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
405 if (in_n_elts >= n_elts || n_elts % in_n_elts)
406 abort ();
407 for (i = 0; i < n_elts; i++)
408 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
410 return gen_rtx_CONST_VECTOR (mode, v);
413 else if (GET_CODE (op) == CONST)
414 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
416 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
418 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
419 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
420 enum machine_mode opmode = GET_MODE (trueop);
421 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
422 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
423 rtvec v = rtvec_alloc (n_elts);
424 unsigned int i;
426 if (op_n_elts != n_elts)
427 abort ();
429 for (i = 0; i < n_elts; i++)
431 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
432 CONST_VECTOR_ELT (trueop, i),
433 GET_MODE_INNER (opmode));
434 if (!x)
435 return 0;
436 RTVEC_ELT (v, i) = x;
438 return gen_rtx_CONST_VECTOR (mode, v);
441 /* The order of these tests is critical so that, for example, we don't
442 check the wrong mode (input vs. output) for a conversion operation,
443 such as FIX. At some point, this should be simplified. */
445 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
446 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
448 HOST_WIDE_INT hv, lv;
449 REAL_VALUE_TYPE d;
451 if (GET_CODE (trueop) == CONST_INT)
452 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
453 else
454 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
456 REAL_VALUE_FROM_INT (d, lv, hv, mode);
457 d = real_value_truncate (mode, d);
458 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
460 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
461 && (GET_CODE (trueop) == CONST_DOUBLE
462 || GET_CODE (trueop) == CONST_INT))
464 HOST_WIDE_INT hv, lv;
465 REAL_VALUE_TYPE d;
467 if (GET_CODE (trueop) == CONST_INT)
468 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
469 else
470 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
472 if (op_mode == VOIDmode)
474 /* We don't know how to interpret negative-looking numbers in
475 this case, so don't try to fold those. */
476 if (hv < 0)
477 return 0;
479 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
481 else
482 hv = 0, lv &= GET_MODE_MASK (op_mode);
484 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
485 d = real_value_truncate (mode, d);
486 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
489 if (GET_CODE (trueop) == CONST_INT
490 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
492 HOST_WIDE_INT arg0 = INTVAL (trueop);
493 HOST_WIDE_INT val;
495 switch (code)
497 case NOT:
498 val = ~ arg0;
499 break;
501 case NEG:
502 val = - arg0;
503 break;
505 case ABS:
506 val = (arg0 >= 0 ? arg0 : - arg0);
507 break;
509 case FFS:
510 /* Don't use ffs here. Instead, get low order bit and then its
511 number. If arg0 is zero, this will return 0, as desired. */
512 arg0 &= GET_MODE_MASK (mode);
513 val = exact_log2 (arg0 & (- arg0)) + 1;
514 break;
516 case CLZ:
517 arg0 &= GET_MODE_MASK (mode);
518 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
520 else
521 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
522 break;
524 case CTZ:
525 arg0 &= GET_MODE_MASK (mode);
526 if (arg0 == 0)
528 /* Even if the value at zero is undefined, we have to come
529 up with some replacement. Seems good enough. */
530 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
531 val = GET_MODE_BITSIZE (mode);
533 else
534 val = exact_log2 (arg0 & -arg0);
535 break;
537 case POPCOUNT:
538 arg0 &= GET_MODE_MASK (mode);
539 val = 0;
540 while (arg0)
541 val++, arg0 &= arg0 - 1;
542 break;
544 case PARITY:
545 arg0 &= GET_MODE_MASK (mode);
546 val = 0;
547 while (arg0)
548 val++, arg0 &= arg0 - 1;
549 val &= 1;
550 break;
552 case TRUNCATE:
553 val = arg0;
554 break;
556 case ZERO_EXTEND:
557 /* When zero-extending a CONST_INT, we need to know its
558 original mode. */
559 if (op_mode == VOIDmode)
560 abort ();
561 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
563 /* If we were really extending the mode,
564 we would have to distinguish between zero-extension
565 and sign-extension. */
566 if (width != GET_MODE_BITSIZE (op_mode))
567 abort ();
568 val = arg0;
570 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
571 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
572 else
573 return 0;
574 break;
576 case SIGN_EXTEND:
577 if (op_mode == VOIDmode)
578 op_mode = mode;
579 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
581 /* If we were really extending the mode,
582 we would have to distinguish between zero-extension
583 and sign-extension. */
584 if (width != GET_MODE_BITSIZE (op_mode))
585 abort ();
586 val = arg0;
588 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
591 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
592 if (val
593 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
594 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
596 else
597 return 0;
598 break;
600 case SQRT:
601 case FLOAT_EXTEND:
602 case FLOAT_TRUNCATE:
603 case SS_TRUNCATE:
604 case US_TRUNCATE:
605 return 0;
607 default:
608 abort ();
611 val = trunc_int_for_mode (val, mode);
613 return GEN_INT (val);
616 /* We can do some operations on integer CONST_DOUBLEs. Also allow
617 for a DImode operation on a CONST_INT. */
618 else if (GET_MODE (trueop) == VOIDmode
619 && width <= HOST_BITS_PER_WIDE_INT * 2
620 && (GET_CODE (trueop) == CONST_DOUBLE
621 || GET_CODE (trueop) == CONST_INT))
623 unsigned HOST_WIDE_INT l1, lv;
624 HOST_WIDE_INT h1, hv;
626 if (GET_CODE (trueop) == CONST_DOUBLE)
627 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
628 else
629 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
631 switch (code)
633 case NOT:
634 lv = ~ l1;
635 hv = ~ h1;
636 break;
638 case NEG:
639 neg_double (l1, h1, &lv, &hv);
640 break;
642 case ABS:
643 if (h1 < 0)
644 neg_double (l1, h1, &lv, &hv);
645 else
646 lv = l1, hv = h1;
647 break;
649 case FFS:
650 hv = 0;
651 if (l1 == 0)
653 if (h1 == 0)
654 lv = 0;
655 else
656 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
658 else
659 lv = exact_log2 (l1 & -l1) + 1;
660 break;
662 case CLZ:
663 hv = 0;
664 if (h1 != 0)
665 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
666 - HOST_BITS_PER_WIDE_INT;
667 else if (l1 != 0)
668 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
669 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
670 lv = GET_MODE_BITSIZE (mode);
671 break;
673 case CTZ:
674 hv = 0;
675 if (l1 != 0)
676 lv = exact_log2 (l1 & -l1);
677 else if (h1 != 0)
678 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
679 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
680 lv = GET_MODE_BITSIZE (mode);
681 break;
683 case POPCOUNT:
684 hv = 0;
685 lv = 0;
686 while (l1)
687 lv++, l1 &= l1 - 1;
688 while (h1)
689 lv++, h1 &= h1 - 1;
690 break;
692 case PARITY:
693 hv = 0;
694 lv = 0;
695 while (l1)
696 lv++, l1 &= l1 - 1;
697 while (h1)
698 lv++, h1 &= h1 - 1;
699 lv &= 1;
700 break;
702 case TRUNCATE:
703 /* This is just a change-of-mode, so do nothing. */
704 lv = l1, hv = h1;
705 break;
707 case ZERO_EXTEND:
708 if (op_mode == VOIDmode)
709 abort ();
711 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
712 return 0;
714 hv = 0;
715 lv = l1 & GET_MODE_MASK (op_mode);
716 break;
718 case SIGN_EXTEND:
719 if (op_mode == VOIDmode
720 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
721 return 0;
722 else
724 lv = l1 & GET_MODE_MASK (op_mode);
725 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
726 && (lv & ((HOST_WIDE_INT) 1
727 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
728 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
730 hv = HWI_SIGN_EXTEND (lv);
732 break;
734 case SQRT:
735 return 0;
737 default:
738 return 0;
741 return immed_double_const (lv, hv, mode);
744 else if (GET_CODE (trueop) == CONST_DOUBLE
745 && GET_MODE_CLASS (mode) == MODE_FLOAT)
747 REAL_VALUE_TYPE d, t;
748 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
750 switch (code)
752 case SQRT:
753 if (HONOR_SNANS (mode) && real_isnan (&d))
754 return 0;
755 real_sqrt (&t, mode, &d);
756 d = t;
757 break;
758 case ABS:
759 d = REAL_VALUE_ABS (d);
760 break;
761 case NEG:
762 d = REAL_VALUE_NEGATE (d);
763 break;
764 case FLOAT_TRUNCATE:
765 d = real_value_truncate (mode, d);
766 break;
767 case FLOAT_EXTEND:
768 /* All this does is change the mode. */
769 break;
770 case FIX:
771 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
772 break;
774 default:
775 abort ();
777 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
780 else if (GET_CODE (trueop) == CONST_DOUBLE
781 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
782 && GET_MODE_CLASS (mode) == MODE_INT
783 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
785 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
786 operators are intentionally left unspecified (to ease implementation
787 by target backends), for consistency, this routine implements the
788 same semantics for constant folding as used by the middle-end. */
790 HOST_WIDE_INT xh, xl, th, tl;
791 REAL_VALUE_TYPE x, t;
792 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
793 switch (code)
795 case FIX:
796 if (REAL_VALUE_ISNAN (x))
797 return const0_rtx;
799 /* Test against the signed upper bound. */
800 if (width > HOST_BITS_PER_WIDE_INT)
802 th = ((unsigned HOST_WIDE_INT) 1
803 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
804 tl = -1;
806 else
808 th = 0;
809 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
811 real_from_integer (&t, VOIDmode, tl, th, 0);
812 if (REAL_VALUES_LESS (t, x))
814 xh = th;
815 xl = tl;
816 break;
819 /* Test against the signed lower bound. */
820 if (width > HOST_BITS_PER_WIDE_INT)
822 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
823 tl = 0;
825 else
827 th = -1;
828 tl = (HOST_WIDE_INT) -1 << (width - 1);
830 real_from_integer (&t, VOIDmode, tl, th, 0);
831 if (REAL_VALUES_LESS (x, t))
833 xh = th;
834 xl = tl;
835 break;
837 REAL_VALUE_TO_INT (&xl, &xh, x);
838 break;
840 case UNSIGNED_FIX:
841 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
842 return const0_rtx;
844 /* Test against the unsigned upper bound. */
845 if (width == 2*HOST_BITS_PER_WIDE_INT)
847 th = -1;
848 tl = -1;
850 else if (width >= HOST_BITS_PER_WIDE_INT)
852 th = ((unsigned HOST_WIDE_INT) 1
853 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
854 tl = -1;
856 else
858 th = 0;
859 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
861 real_from_integer (&t, VOIDmode, tl, th, 1);
862 if (REAL_VALUES_LESS (t, x))
864 xh = th;
865 xl = tl;
866 break;
869 REAL_VALUE_TO_INT (&xl, &xh, x);
870 break;
872 default:
873 abort ();
875 return immed_double_const (xl, xh, mode);
878 /* This was formerly used only for non-IEEE float.
879 eggert@twinsun.com says it is safe for IEEE also. */
880 else
882 enum rtx_code reversed;
883 rtx temp;
885 /* There are some simplifications we can do even if the operands
886 aren't constant. */
887 switch (code)
889 case NOT:
890 /* (not (not X)) == X. */
891 if (GET_CODE (op) == NOT)
892 return XEXP (op, 0);
894 /* (not (eq X Y)) == (ne X Y), etc. */
895 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
896 && (mode == BImode || STORE_FLAG_VALUE == -1)
897 && ((reversed = reversed_comparison_code (op, NULL_RTX))
898 != UNKNOWN))
899 return simplify_gen_relational (reversed, mode, VOIDmode,
900 XEXP (op, 0), XEXP (op, 1));
902 /* (not (plus X -1)) can become (neg X). */
903 if (GET_CODE (op) == PLUS
904 && XEXP (op, 1) == constm1_rtx)
905 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
907 /* Similarly, (not (neg X)) is (plus X -1). */
908 if (GET_CODE (op) == NEG)
909 return plus_constant (XEXP (op, 0), -1);
911 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
912 if (GET_CODE (op) == XOR
913 && GET_CODE (XEXP (op, 1)) == CONST_INT
914 && (temp = simplify_unary_operation (NOT, mode,
915 XEXP (op, 1),
916 mode)) != 0)
917 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
920 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
921 operands other than 1, but that is not valid. We could do a
922 similar simplification for (not (lshiftrt C X)) where C is
923 just the sign bit, but this doesn't seem common enough to
924 bother with. */
925 if (GET_CODE (op) == ASHIFT
926 && XEXP (op, 0) == const1_rtx)
928 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
929 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
932 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
933 by reversing the comparison code if valid. */
934 if (STORE_FLAG_VALUE == -1
935 && GET_RTX_CLASS (GET_CODE (op)) == '<'
936 && (reversed = reversed_comparison_code (op, NULL_RTX))
937 != UNKNOWN)
938 return simplify_gen_relational (reversed, mode, VOIDmode,
939 XEXP (op, 0), XEXP (op, 1));
941 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
942 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
943 so we can perform the above simplification. */
945 if (STORE_FLAG_VALUE == -1
946 && GET_CODE (op) == ASHIFTRT
947 && GET_CODE (XEXP (op, 1)) == CONST_INT
948 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
949 return simplify_gen_relational (GE, mode, VOIDmode,
950 XEXP (op, 0), const0_rtx);
952 break;
954 case NEG:
955 /* (neg (neg X)) == X. */
956 if (GET_CODE (op) == NEG)
957 return XEXP (op, 0);
959 /* (neg (plus X 1)) can become (not X). */
960 if (GET_CODE (op) == PLUS
961 && XEXP (op, 1) == const1_rtx)
962 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
964 /* Similarly, (neg (not X)) is (plus X 1). */
965 if (GET_CODE (op) == NOT)
966 return plus_constant (XEXP (op, 0), 1);
968 /* (neg (minus X Y)) can become (minus Y X). This transformation
969 isn't safe for modes with signed zeros, since if X and Y are
970 both +0, (minus Y X) is the same as (minus X Y). If the
971 rounding mode is towards +infinity (or -infinity) then the two
972 expressions will be rounded differently. */
973 if (GET_CODE (op) == MINUS
974 && !HONOR_SIGNED_ZEROS (mode)
975 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
976 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
977 XEXP (op, 0));
979 if (GET_CODE (op) == PLUS
980 && !HONOR_SIGNED_ZEROS (mode)
981 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
983 /* (neg (plus A C)) is simplified to (minus -C A). */
984 if (GET_CODE (XEXP (op, 1)) == CONST_INT
985 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
987 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
988 mode);
989 if (temp)
990 return simplify_gen_binary (MINUS, mode, temp,
991 XEXP (op, 0));
994 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
995 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
996 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
999 /* (neg (mult A B)) becomes (mult (neg A) B).
1000 This works even for floating-point values. */
1001 if (GET_CODE (op) == MULT
1002 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1004 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1005 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1008 /* NEG commutes with ASHIFT since it is multiplication. Only do
1009 this if we can then eliminate the NEG (e.g., if the operand
1010 is a constant). */
1011 if (GET_CODE (op) == ASHIFT)
1013 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1014 mode);
1015 if (temp)
1016 return simplify_gen_binary (ASHIFT, mode, temp,
1017 XEXP (op, 1));
1020 break;
1022 case SIGN_EXTEND:
1023 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1024 becomes just the MINUS if its mode is MODE. This allows
1025 folding switch statements on machines using casesi (such as
1026 the VAX). */
1027 if (GET_CODE (op) == TRUNCATE
1028 && GET_MODE (XEXP (op, 0)) == mode
1029 && GET_CODE (XEXP (op, 0)) == MINUS
1030 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1031 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1032 return XEXP (op, 0);
1034 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1035 if (! POINTERS_EXTEND_UNSIGNED
1036 && mode == Pmode && GET_MODE (op) == ptr_mode
1037 && (CONSTANT_P (op)
1038 || (GET_CODE (op) == SUBREG
1039 && GET_CODE (SUBREG_REG (op)) == REG
1040 && REG_POINTER (SUBREG_REG (op))
1041 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1042 return convert_memory_address (Pmode, op);
1043 #endif
1044 break;
1046 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1047 case ZERO_EXTEND:
1048 if (POINTERS_EXTEND_UNSIGNED > 0
1049 && mode == Pmode && GET_MODE (op) == ptr_mode
1050 && (CONSTANT_P (op)
1051 || (GET_CODE (op) == SUBREG
1052 && GET_CODE (SUBREG_REG (op)) == REG
1053 && REG_POINTER (SUBREG_REG (op))
1054 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1055 return convert_memory_address (Pmode, op);
1056 break;
1057 #endif
1059 default:
1060 break;
1063 return 0;
1067 /* Subroutine of simplify_associative_operation. Return true if rtx OP
1068 is a suitable integer or floating point immediate constant. */
1069 static bool
1070 associative_constant_p (rtx op)
1072 if (GET_CODE (op) == CONST_INT
1073 || GET_CODE (op) == CONST_DOUBLE)
1074 return true;
1075 op = avoid_constant_pool_reference (op);
1076 return GET_CODE (op) == CONST_INT
1077 || GET_CODE (op) == CONST_DOUBLE;
1080 /* Subroutine of simplify_binary_operation to simplify an associative
1081 binary operation CODE with result mode MODE, operating on OP0 and OP1.
1082 Return 0 if no simplification is possible. */
1083 static rtx
1084 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1085 rtx op0, rtx op1)
1087 rtx tem;
1089 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
1090 if (GET_CODE (op0) == code
1091 && associative_constant_p (op1)
1092 && associative_constant_p (XEXP (op0, 1)))
1094 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1095 if (! tem)
1096 return tem;
1097 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1100 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1101 if (GET_CODE (op0) == code
1102 && GET_CODE (op1) == code
1103 && associative_constant_p (XEXP (op0, 1))
1104 && associative_constant_p (XEXP (op1, 1)))
1106 rtx c = simplify_binary_operation (code, mode,
1107 XEXP (op0, 1), XEXP (op1, 1));
1108 if (! c)
1109 return 0;
1110 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1111 return simplify_gen_binary (code, mode, tem, c);
1114 /* Canonicalize (x op c) op y as (x op y) op c. */
1115 if (GET_CODE (op0) == code
1116 && associative_constant_p (XEXP (op0, 1)))
1118 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1119 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1122 /* Canonicalize x op (y op c) as (x op y) op c. */
1123 if (GET_CODE (op1) == code
1124 && associative_constant_p (XEXP (op1, 1)))
1126 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1127 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1130 return 0;
1133 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1134 and OP1. Return 0 if no simplification is possible.
1136 Don't use this for relational operations such as EQ or LT.
1137 Use simplify_relational_operation instead. */
1139 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1140 rtx op0, rtx op1)
1142 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1143 HOST_WIDE_INT val;
1144 unsigned int width = GET_MODE_BITSIZE (mode);
1145 rtx tem;
1146 rtx trueop0 = avoid_constant_pool_reference (op0);
1147 rtx trueop1 = avoid_constant_pool_reference (op1);
1149 /* Relational operations don't work here. We must know the mode
1150 of the operands in order to do the comparison correctly.
1151 Assuming a full word can give incorrect results.
1152 Consider comparing 128 with -128 in QImode. */
1154 if (GET_RTX_CLASS (code) == '<')
1155 abort ();
1157 /* Make sure the constant is second. */
1158 if (GET_RTX_CLASS (code) == 'c'
1159 && swap_commutative_operands_p (trueop0, trueop1))
1161 tem = op0, op0 = op1, op1 = tem;
1162 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1165 if (VECTOR_MODE_P (mode)
1166 && GET_CODE (trueop0) == CONST_VECTOR
1167 && GET_CODE (trueop1) == CONST_VECTOR)
1169 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1170 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1171 enum machine_mode op0mode = GET_MODE (trueop0);
1172 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1173 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1174 enum machine_mode op1mode = GET_MODE (trueop1);
1175 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1176 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1177 rtvec v = rtvec_alloc (n_elts);
1178 unsigned int i;
1180 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1181 abort ();
1183 for (i = 0; i < n_elts; i++)
1185 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1186 CONST_VECTOR_ELT (trueop0, i),
1187 CONST_VECTOR_ELT (trueop1, i));
1188 if (!x)
1189 return 0;
1190 RTVEC_ELT (v, i) = x;
1193 return gen_rtx_CONST_VECTOR (mode, v);
1196 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1197 && GET_CODE (trueop0) == CONST_DOUBLE
1198 && GET_CODE (trueop1) == CONST_DOUBLE
1199 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1201 REAL_VALUE_TYPE f0, f1, value;
1203 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1204 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1205 f0 = real_value_truncate (mode, f0);
1206 f1 = real_value_truncate (mode, f1);
1208 if (HONOR_SNANS (mode)
1209 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1210 return 0;
1212 if (code == DIV
1213 && REAL_VALUES_EQUAL (f1, dconst0)
1214 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1215 return 0;
1217 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1219 value = real_value_truncate (mode, value);
1220 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1223 /* We can fold some multi-word operations. */
1224 if (GET_MODE_CLASS (mode) == MODE_INT
1225 && width == HOST_BITS_PER_WIDE_INT * 2
1226 && (GET_CODE (trueop0) == CONST_DOUBLE
1227 || GET_CODE (trueop0) == CONST_INT)
1228 && (GET_CODE (trueop1) == CONST_DOUBLE
1229 || GET_CODE (trueop1) == CONST_INT))
1231 unsigned HOST_WIDE_INT l1, l2, lv;
1232 HOST_WIDE_INT h1, h2, hv;
1234 if (GET_CODE (trueop0) == CONST_DOUBLE)
1235 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1236 else
1237 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1239 if (GET_CODE (trueop1) == CONST_DOUBLE)
1240 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1241 else
1242 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1244 switch (code)
1246 case MINUS:
1247 /* A - B == A + (-B). */
1248 neg_double (l2, h2, &lv, &hv);
1249 l2 = lv, h2 = hv;
1251 /* Fall through.... */
1253 case PLUS:
1254 add_double (l1, h1, l2, h2, &lv, &hv);
1255 break;
1257 case MULT:
1258 mul_double (l1, h1, l2, h2, &lv, &hv);
1259 break;
1261 case DIV: case MOD: case UDIV: case UMOD:
1262 /* We'd need to include tree.h to do this and it doesn't seem worth
1263 it. */
1264 return 0;
1266 case AND:
1267 lv = l1 & l2, hv = h1 & h2;
1268 break;
1270 case IOR:
1271 lv = l1 | l2, hv = h1 | h2;
1272 break;
1274 case XOR:
1275 lv = l1 ^ l2, hv = h1 ^ h2;
1276 break;
1278 case SMIN:
1279 if (h1 < h2
1280 || (h1 == h2
1281 && ((unsigned HOST_WIDE_INT) l1
1282 < (unsigned HOST_WIDE_INT) l2)))
1283 lv = l1, hv = h1;
1284 else
1285 lv = l2, hv = h2;
1286 break;
1288 case SMAX:
1289 if (h1 > h2
1290 || (h1 == h2
1291 && ((unsigned HOST_WIDE_INT) l1
1292 > (unsigned HOST_WIDE_INT) l2)))
1293 lv = l1, hv = h1;
1294 else
1295 lv = l2, hv = h2;
1296 break;
1298 case UMIN:
1299 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1300 || (h1 == h2
1301 && ((unsigned HOST_WIDE_INT) l1
1302 < (unsigned HOST_WIDE_INT) l2)))
1303 lv = l1, hv = h1;
1304 else
1305 lv = l2, hv = h2;
1306 break;
1308 case UMAX:
1309 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1310 || (h1 == h2
1311 && ((unsigned HOST_WIDE_INT) l1
1312 > (unsigned HOST_WIDE_INT) l2)))
1313 lv = l1, hv = h1;
1314 else
1315 lv = l2, hv = h2;
1316 break;
1318 case LSHIFTRT: case ASHIFTRT:
1319 case ASHIFT:
1320 case ROTATE: case ROTATERT:
1321 #ifdef SHIFT_COUNT_TRUNCATED
1322 if (SHIFT_COUNT_TRUNCATED)
1323 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1324 #endif
1326 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1327 return 0;
1329 if (code == LSHIFTRT || code == ASHIFTRT)
1330 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1331 code == ASHIFTRT);
1332 else if (code == ASHIFT)
1333 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1334 else if (code == ROTATE)
1335 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1336 else /* code == ROTATERT */
1337 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1338 break;
1340 default:
1341 return 0;
1344 return immed_double_const (lv, hv, mode);
1347 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1348 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1350 /* Even if we can't compute a constant result,
1351 there are some cases worth simplifying. */
1353 switch (code)
1355 case PLUS:
1356 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1357 when x is NaN, infinite, or finite and nonzero. They aren't
1358 when x is -0 and the rounding mode is not towards -infinity,
1359 since (-0) + 0 is then 0. */
1360 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1361 return op0;
1363 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1364 transformations are safe even for IEEE. */
1365 if (GET_CODE (op0) == NEG)
1366 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1367 else if (GET_CODE (op1) == NEG)
1368 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1370 /* (~a) + 1 -> -a */
1371 if (INTEGRAL_MODE_P (mode)
1372 && GET_CODE (op0) == NOT
1373 && trueop1 == const1_rtx)
1374 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1376 /* Handle both-operands-constant cases. We can only add
1377 CONST_INTs to constants since the sum of relocatable symbols
1378 can't be handled by most assemblers. Don't add CONST_INT
1379 to CONST_INT since overflow won't be computed properly if wider
1380 than HOST_BITS_PER_WIDE_INT. */
1382 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1383 && GET_CODE (op1) == CONST_INT)
1384 return plus_constant (op0, INTVAL (op1));
1385 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1386 && GET_CODE (op0) == CONST_INT)
1387 return plus_constant (op1, INTVAL (op0));
1389 /* See if this is something like X * C - X or vice versa or
1390 if the multiplication is written as a shift. If so, we can
1391 distribute and make a new multiply, shift, or maybe just
1392 have X (if C is 2 in the example above). But don't make
1393 real multiply if we didn't have one before. */
1395 if (! FLOAT_MODE_P (mode))
1397 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1398 rtx lhs = op0, rhs = op1;
1399 int had_mult = 0;
1401 if (GET_CODE (lhs) == NEG)
1402 coeff0 = -1, lhs = XEXP (lhs, 0);
1403 else if (GET_CODE (lhs) == MULT
1404 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1406 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1407 had_mult = 1;
1409 else if (GET_CODE (lhs) == ASHIFT
1410 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1411 && INTVAL (XEXP (lhs, 1)) >= 0
1412 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1414 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1415 lhs = XEXP (lhs, 0);
1418 if (GET_CODE (rhs) == NEG)
1419 coeff1 = -1, rhs = XEXP (rhs, 0);
1420 else if (GET_CODE (rhs) == MULT
1421 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1423 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1424 had_mult = 1;
1426 else if (GET_CODE (rhs) == ASHIFT
1427 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1428 && INTVAL (XEXP (rhs, 1)) >= 0
1429 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1431 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1432 rhs = XEXP (rhs, 0);
1435 if (rtx_equal_p (lhs, rhs))
1437 tem = simplify_gen_binary (MULT, mode, lhs,
1438 GEN_INT (coeff0 + coeff1));
1439 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1443 /* If one of the operands is a PLUS or a MINUS, see if we can
1444 simplify this by the associative law.
1445 Don't use the associative law for floating point.
1446 The inaccuracy makes it nonassociative,
1447 and subtle programs can break if operations are associated. */
1449 if (INTEGRAL_MODE_P (mode)
1450 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1451 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1452 || (GET_CODE (op0) == CONST
1453 && GET_CODE (XEXP (op0, 0)) == PLUS)
1454 || (GET_CODE (op1) == CONST
1455 && GET_CODE (XEXP (op1, 0)) == PLUS))
1456 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1457 return tem;
1459 /* Reassociate floating point addition only when the user
1460 specifies unsafe math optimizations. */
1461 if (FLOAT_MODE_P (mode)
1462 && flag_unsafe_math_optimizations)
1464 tem = simplify_associative_operation (code, mode, op0, op1);
1465 if (tem)
1466 return tem;
1468 break;
1470 case COMPARE:
1471 #ifdef HAVE_cc0
1472 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1473 using cc0, in which case we want to leave it as a COMPARE
1474 so we can distinguish it from a register-register-copy.
1476 In IEEE floating point, x-0 is not the same as x. */
1478 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1479 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1480 && trueop1 == CONST0_RTX (mode))
1481 return op0;
1482 #endif
1484 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1485 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1486 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1487 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1489 rtx xop00 = XEXP (op0, 0);
1490 rtx xop10 = XEXP (op1, 0);
1492 #ifdef HAVE_cc0
1493 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1494 #else
1495 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1496 && GET_MODE (xop00) == GET_MODE (xop10)
1497 && REGNO (xop00) == REGNO (xop10)
1498 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1499 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1500 #endif
1501 return xop00;
1503 break;
1505 case MINUS:
1506 /* We can't assume x-x is 0 even with non-IEEE floating point,
1507 but since it is zero except in very strange circumstances, we
1508 will treat it as zero with -funsafe-math-optimizations. */
1509 if (rtx_equal_p (trueop0, trueop1)
1510 && ! side_effects_p (op0)
1511 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1512 return CONST0_RTX (mode);
1514 /* Change subtraction from zero into negation. (0 - x) is the
1515 same as -x when x is NaN, infinite, or finite and nonzero.
1516 But if the mode has signed zeros, and does not round towards
1517 -infinity, then 0 - 0 is 0, not -0. */
1518 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1519 return simplify_gen_unary (NEG, mode, op1, mode);
1521 /* (-1 - a) is ~a. */
1522 if (trueop0 == constm1_rtx)
1523 return simplify_gen_unary (NOT, mode, op1, mode);
1525 /* Subtracting 0 has no effect unless the mode has signed zeros
1526 and supports rounding towards -infinity. In such a case,
1527 0 - 0 is -0. */
1528 if (!(HONOR_SIGNED_ZEROS (mode)
1529 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1530 && trueop1 == CONST0_RTX (mode))
1531 return op0;
1533 /* See if this is something like X * C - X or vice versa or
1534 if the multiplication is written as a shift. If so, we can
1535 distribute and make a new multiply, shift, or maybe just
1536 have X (if C is 2 in the example above). But don't make
1537 real multiply if we didn't have one before. */
1539 if (! FLOAT_MODE_P (mode))
1541 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1542 rtx lhs = op0, rhs = op1;
1543 int had_mult = 0;
1545 if (GET_CODE (lhs) == NEG)
1546 coeff0 = -1, lhs = XEXP (lhs, 0);
1547 else if (GET_CODE (lhs) == MULT
1548 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1550 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1551 had_mult = 1;
1553 else if (GET_CODE (lhs) == ASHIFT
1554 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1555 && INTVAL (XEXP (lhs, 1)) >= 0
1556 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1558 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1559 lhs = XEXP (lhs, 0);
1562 if (GET_CODE (rhs) == NEG)
1563 coeff1 = - 1, rhs = XEXP (rhs, 0);
1564 else if (GET_CODE (rhs) == MULT
1565 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1567 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1568 had_mult = 1;
1570 else if (GET_CODE (rhs) == ASHIFT
1571 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1572 && INTVAL (XEXP (rhs, 1)) >= 0
1573 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1575 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1576 rhs = XEXP (rhs, 0);
1579 if (rtx_equal_p (lhs, rhs))
1581 tem = simplify_gen_binary (MULT, mode, lhs,
1582 GEN_INT (coeff0 - coeff1));
1583 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1587 /* (a - (-b)) -> (a + b). True even for IEEE. */
1588 if (GET_CODE (op1) == NEG)
1589 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1591 /* (-x - c) may be simplified as (-c - x). */
1592 if (GET_CODE (op0) == NEG
1593 && (GET_CODE (op1) == CONST_INT
1594 || GET_CODE (op1) == CONST_DOUBLE))
1596 tem = simplify_unary_operation (NEG, mode, op1, mode);
1597 if (tem)
1598 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1601 /* If one of the operands is a PLUS or a MINUS, see if we can
1602 simplify this by the associative law.
1603 Don't use the associative law for floating point.
1604 The inaccuracy makes it nonassociative,
1605 and subtle programs can break if operations are associated. */
1607 if (INTEGRAL_MODE_P (mode)
1608 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1609 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1610 || (GET_CODE (op0) == CONST
1611 && GET_CODE (XEXP (op0, 0)) == PLUS)
1612 || (GET_CODE (op1) == CONST
1613 && GET_CODE (XEXP (op1, 0)) == PLUS))
1614 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1615 return tem;
1617 /* Don't let a relocatable value get a negative coeff. */
1618 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1619 return simplify_gen_binary (PLUS, mode,
1620 op0,
1621 neg_const_int (mode, op1));
1623 /* (x - (x & y)) -> (x & ~y) */
1624 if (GET_CODE (op1) == AND)
1626 if (rtx_equal_p (op0, XEXP (op1, 0)))
1628 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1629 GET_MODE (XEXP (op1, 1)));
1630 return simplify_gen_binary (AND, mode, op0, tem);
1632 if (rtx_equal_p (op0, XEXP (op1, 1)))
1634 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1635 GET_MODE (XEXP (op1, 0)));
1636 return simplify_gen_binary (AND, mode, op0, tem);
1639 break;
1641 case MULT:
1642 if (trueop1 == constm1_rtx)
1643 return simplify_gen_unary (NEG, mode, op0, mode);
1645 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1646 x is NaN, since x * 0 is then also NaN. Nor is it valid
1647 when the mode has signed zeros, since multiplying a negative
1648 number by 0 will give -0, not 0. */
1649 if (!HONOR_NANS (mode)
1650 && !HONOR_SIGNED_ZEROS (mode)
1651 && trueop1 == CONST0_RTX (mode)
1652 && ! side_effects_p (op0))
1653 return op1;
1655 /* In IEEE floating point, x*1 is not equivalent to x for
1656 signalling NaNs. */
1657 if (!HONOR_SNANS (mode)
1658 && trueop1 == CONST1_RTX (mode))
1659 return op0;
1661 /* Convert multiply by constant power of two into shift unless
1662 we are still generating RTL. This test is a kludge. */
1663 if (GET_CODE (trueop1) == CONST_INT
1664 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1665 /* If the mode is larger than the host word size, and the
1666 uppermost bit is set, then this isn't a power of two due
1667 to implicit sign extension. */
1668 && (width <= HOST_BITS_PER_WIDE_INT
1669 || val != HOST_BITS_PER_WIDE_INT - 1)
1670 && ! rtx_equal_function_value_matters)
1671 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1673 /* x*2 is x+x and x*(-1) is -x */
1674 if (GET_CODE (trueop1) == CONST_DOUBLE
1675 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1676 && GET_MODE (op0) == mode)
1678 REAL_VALUE_TYPE d;
1679 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1681 if (REAL_VALUES_EQUAL (d, dconst2))
1682 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1684 if (REAL_VALUES_EQUAL (d, dconstm1))
1685 return simplify_gen_unary (NEG, mode, op0, mode);
1688 /* Reassociate multiplication, but for floating point MULTs
1689 only when the user specifies unsafe math optimizations. */
1690 if (! FLOAT_MODE_P (mode)
1691 || flag_unsafe_math_optimizations)
1693 tem = simplify_associative_operation (code, mode, op0, op1);
1694 if (tem)
1695 return tem;
1697 break;
1699 case IOR:
1700 if (trueop1 == const0_rtx)
1701 return op0;
1702 if (GET_CODE (trueop1) == CONST_INT
1703 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1704 == GET_MODE_MASK (mode)))
1705 return op1;
1706 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1707 return op0;
1708 /* A | (~A) -> -1 */
1709 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1710 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1711 && ! side_effects_p (op0)
1712 && GET_MODE_CLASS (mode) != MODE_CC)
1713 return constm1_rtx;
1714 tem = simplify_associative_operation (code, mode, op0, op1);
1715 if (tem)
1716 return tem;
1717 break;
1719 case XOR:
1720 if (trueop1 == const0_rtx)
1721 return op0;
1722 if (GET_CODE (trueop1) == CONST_INT
1723 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1724 == GET_MODE_MASK (mode)))
1725 return simplify_gen_unary (NOT, mode, op0, mode);
1726 if (trueop0 == trueop1 && ! side_effects_p (op0)
1727 && GET_MODE_CLASS (mode) != MODE_CC)
1728 return const0_rtx;
1729 tem = simplify_associative_operation (code, mode, op0, op1);
1730 if (tem)
1731 return tem;
1732 break;
1734 case AND:
1735 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1736 return const0_rtx;
1737 if (GET_CODE (trueop1) == CONST_INT
1738 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1739 == GET_MODE_MASK (mode)))
1740 return op0;
1741 if (trueop0 == trueop1 && ! side_effects_p (op0)
1742 && GET_MODE_CLASS (mode) != MODE_CC)
1743 return op0;
1744 /* A & (~A) -> 0 */
1745 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1746 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1747 && ! side_effects_p (op0)
1748 && GET_MODE_CLASS (mode) != MODE_CC)
1749 return const0_rtx;
1750 tem = simplify_associative_operation (code, mode, op0, op1);
1751 if (tem)
1752 return tem;
1753 break;
1755 case UDIV:
1756 /* Convert divide by power of two into shift (divide by 1 handled
1757 below). */
1758 if (GET_CODE (trueop1) == CONST_INT
1759 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1760 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1762 /* Fall through.... */
1764 case DIV:
1765 if (trueop1 == CONST1_RTX (mode))
1767 /* On some platforms DIV uses narrower mode than its
1768 operands. */
1769 rtx x = gen_lowpart_common (mode, op0);
1770 if (x)
1771 return x;
1772 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1773 return gen_lowpart_SUBREG (mode, op0);
1774 else
1775 return op0;
1778 /* Maybe change 0 / x to 0. This transformation isn't safe for
1779 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1780 Nor is it safe for modes with signed zeros, since dividing
1781 0 by a negative number gives -0, not 0. */
1782 if (!HONOR_NANS (mode)
1783 && !HONOR_SIGNED_ZEROS (mode)
1784 && trueop0 == CONST0_RTX (mode)
1785 && ! side_effects_p (op1))
1786 return op0;
1788 /* Change division by a constant into multiplication. Only do
1789 this with -funsafe-math-optimizations. */
1790 else if (GET_CODE (trueop1) == CONST_DOUBLE
1791 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1792 && trueop1 != CONST0_RTX (mode)
1793 && flag_unsafe_math_optimizations)
1795 REAL_VALUE_TYPE d;
1796 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1798 if (! REAL_VALUES_EQUAL (d, dconst0))
1800 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1801 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1802 return simplify_gen_binary (MULT, mode, op0, tem);
1805 break;
1807 case UMOD:
1808 /* Handle modulus by power of two (mod with 1 handled below). */
1809 if (GET_CODE (trueop1) == CONST_INT
1810 && exact_log2 (INTVAL (trueop1)) > 0)
1811 return simplify_gen_binary (AND, mode, op0,
1812 GEN_INT (INTVAL (op1) - 1));
1814 /* Fall through.... */
1816 case MOD:
1817 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1818 && ! side_effects_p (op0) && ! side_effects_p (op1))
1819 return const0_rtx;
1820 break;
1822 case ROTATERT:
1823 case ROTATE:
1824 case ASHIFTRT:
1825 /* Rotating ~0 always results in ~0. */
1826 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1827 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1828 && ! side_effects_p (op1))
1829 return op0;
1831 /* Fall through.... */
1833 case ASHIFT:
1834 case LSHIFTRT:
1835 if (trueop1 == const0_rtx)
1836 return op0;
1837 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1838 return op0;
1839 break;
1841 case SMIN:
1842 if (width <= HOST_BITS_PER_WIDE_INT
1843 && GET_CODE (trueop1) == CONST_INT
1844 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1845 && ! side_effects_p (op0))
1846 return op1;
1847 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1848 return op0;
1849 tem = simplify_associative_operation (code, mode, op0, op1);
1850 if (tem)
1851 return tem;
1852 break;
1854 case SMAX:
1855 if (width <= HOST_BITS_PER_WIDE_INT
1856 && GET_CODE (trueop1) == CONST_INT
1857 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1858 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1859 && ! side_effects_p (op0))
1860 return op1;
1861 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1862 return op0;
1863 tem = simplify_associative_operation (code, mode, op0, op1);
1864 if (tem)
1865 return tem;
1866 break;
1868 case UMIN:
1869 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1870 return op1;
1871 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1872 return op0;
1873 tem = simplify_associative_operation (code, mode, op0, op1);
1874 if (tem)
1875 return tem;
1876 break;
1878 case UMAX:
1879 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1880 return op1;
1881 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1882 return op0;
1883 tem = simplify_associative_operation (code, mode, op0, op1);
1884 if (tem)
1885 return tem;
1886 break;
1888 case SS_PLUS:
1889 case US_PLUS:
1890 case SS_MINUS:
1891 case US_MINUS:
1892 /* ??? There are simplifications that can be done. */
1893 return 0;
1895 case VEC_SELECT:
1896 if (!VECTOR_MODE_P (mode))
1898 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1899 || (mode
1900 != GET_MODE_INNER (GET_MODE (trueop0)))
1901 || GET_CODE (trueop1) != PARALLEL
1902 || XVECLEN (trueop1, 0) != 1
1903 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1904 abort ();
1906 if (GET_CODE (trueop0) == CONST_VECTOR)
1907 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1909 else
1911 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1912 || (GET_MODE_INNER (mode)
1913 != GET_MODE_INNER (GET_MODE (trueop0)))
1914 || GET_CODE (trueop1) != PARALLEL)
1915 abort ();
1917 if (GET_CODE (trueop0) == CONST_VECTOR)
1919 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1920 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1921 rtvec v = rtvec_alloc (n_elts);
1922 unsigned int i;
1924 if (XVECLEN (trueop1, 0) != (int) n_elts)
1925 abort ();
1926 for (i = 0; i < n_elts; i++)
1928 rtx x = XVECEXP (trueop1, 0, i);
1930 if (GET_CODE (x) != CONST_INT)
1931 abort ();
1932 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1935 return gen_rtx_CONST_VECTOR (mode, v);
1938 return 0;
1939 case VEC_CONCAT:
1941 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1942 ? GET_MODE (trueop0)
1943 : GET_MODE_INNER (mode));
1944 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1945 ? GET_MODE (trueop1)
1946 : GET_MODE_INNER (mode));
1948 if (!VECTOR_MODE_P (mode)
1949 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1950 != GET_MODE_SIZE (mode)))
1951 abort ();
1953 if ((VECTOR_MODE_P (op0_mode)
1954 && (GET_MODE_INNER (mode)
1955 != GET_MODE_INNER (op0_mode)))
1956 || (!VECTOR_MODE_P (op0_mode)
1957 && GET_MODE_INNER (mode) != op0_mode))
1958 abort ();
1960 if ((VECTOR_MODE_P (op1_mode)
1961 && (GET_MODE_INNER (mode)
1962 != GET_MODE_INNER (op1_mode)))
1963 || (!VECTOR_MODE_P (op1_mode)
1964 && GET_MODE_INNER (mode) != op1_mode))
1965 abort ();
1967 if ((GET_CODE (trueop0) == CONST_VECTOR
1968 || GET_CODE (trueop0) == CONST_INT
1969 || GET_CODE (trueop0) == CONST_DOUBLE)
1970 && (GET_CODE (trueop1) == CONST_VECTOR
1971 || GET_CODE (trueop1) == CONST_INT
1972 || GET_CODE (trueop1) == CONST_DOUBLE))
1974 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1975 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1976 rtvec v = rtvec_alloc (n_elts);
1977 unsigned int i;
1978 unsigned in_n_elts = 1;
1980 if (VECTOR_MODE_P (op0_mode))
1981 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1982 for (i = 0; i < n_elts; i++)
1984 if (i < in_n_elts)
1986 if (!VECTOR_MODE_P (op0_mode))
1987 RTVEC_ELT (v, i) = trueop0;
1988 else
1989 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1991 else
1993 if (!VECTOR_MODE_P (op1_mode))
1994 RTVEC_ELT (v, i) = trueop1;
1995 else
1996 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1997 i - in_n_elts);
2001 return gen_rtx_CONST_VECTOR (mode, v);
2004 return 0;
2006 default:
2007 abort ();
2010 return 0;
2013 /* Get the integer argument values in two forms:
2014 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2016 arg0 = INTVAL (trueop0);
2017 arg1 = INTVAL (trueop1);
2019 if (width < HOST_BITS_PER_WIDE_INT)
2021 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2022 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2024 arg0s = arg0;
2025 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2026 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2028 arg1s = arg1;
2029 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2030 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2032 else
2034 arg0s = arg0;
2035 arg1s = arg1;
2038 /* Compute the value of the arithmetic. */
2040 switch (code)
2042 case PLUS:
2043 val = arg0s + arg1s;
2044 break;
2046 case MINUS:
2047 val = arg0s - arg1s;
2048 break;
2050 case MULT:
2051 val = arg0s * arg1s;
2052 break;
2054 case DIV:
2055 if (arg1s == 0
2056 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2057 && arg1s == -1))
2058 return 0;
2059 val = arg0s / arg1s;
2060 break;
2062 case MOD:
2063 if (arg1s == 0
2064 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2065 && arg1s == -1))
2066 return 0;
2067 val = arg0s % arg1s;
2068 break;
2070 case UDIV:
2071 if (arg1 == 0
2072 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2073 && arg1s == -1))
2074 return 0;
2075 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2076 break;
2078 case UMOD:
2079 if (arg1 == 0
2080 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2081 && arg1s == -1))
2082 return 0;
2083 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2084 break;
2086 case AND:
2087 val = arg0 & arg1;
2088 break;
2090 case IOR:
2091 val = arg0 | arg1;
2092 break;
2094 case XOR:
2095 val = arg0 ^ arg1;
2096 break;
2098 case LSHIFTRT:
2099 /* If shift count is undefined, don't fold it; let the machine do
2100 what it wants. But truncate it if the machine will do that. */
2101 if (arg1 < 0)
2102 return 0;
2104 #ifdef SHIFT_COUNT_TRUNCATED
2105 if (SHIFT_COUNT_TRUNCATED)
2106 arg1 %= width;
2107 #endif
2109 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2110 break;
2112 case ASHIFT:
2113 if (arg1 < 0)
2114 return 0;
2116 #ifdef SHIFT_COUNT_TRUNCATED
2117 if (SHIFT_COUNT_TRUNCATED)
2118 arg1 %= width;
2119 #endif
2121 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2122 break;
2124 case ASHIFTRT:
2125 if (arg1 < 0)
2126 return 0;
2128 #ifdef SHIFT_COUNT_TRUNCATED
2129 if (SHIFT_COUNT_TRUNCATED)
2130 arg1 %= width;
2131 #endif
2133 val = arg0s >> arg1;
2135 /* Bootstrap compiler may not have sign extended the right shift.
2136 Manually extend the sign to insure bootstrap cc matches gcc. */
2137 if (arg0s < 0 && arg1 > 0)
2138 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2140 break;
2142 case ROTATERT:
2143 if (arg1 < 0)
2144 return 0;
2146 arg1 %= width;
2147 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2148 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2149 break;
2151 case ROTATE:
2152 if (arg1 < 0)
2153 return 0;
2155 arg1 %= width;
2156 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2157 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2158 break;
2160 case COMPARE:
2161 /* Do nothing here. */
2162 return 0;
2164 case SMIN:
2165 val = arg0s <= arg1s ? arg0s : arg1s;
2166 break;
2168 case UMIN:
2169 val = ((unsigned HOST_WIDE_INT) arg0
2170 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2171 break;
2173 case SMAX:
2174 val = arg0s > arg1s ? arg0s : arg1s;
2175 break;
2177 case UMAX:
2178 val = ((unsigned HOST_WIDE_INT) arg0
2179 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2180 break;
2182 case SS_PLUS:
2183 case US_PLUS:
2184 case SS_MINUS:
2185 case US_MINUS:
2186 /* ??? There are simplifications that can be done. */
2187 return 0;
2189 default:
2190 abort ();
2193 val = trunc_int_for_mode (val, mode);
2195 return GEN_INT (val);
2198 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2199 PLUS or MINUS.
2201 Rather than test for specific case, we do this by a brute-force method
2202 and do all possible simplifications until no more changes occur. Then
2203 we rebuild the operation.
2205 If FORCE is true, then always generate the rtx. This is used to
2206 canonicalize stuff emitted from simplify_gen_binary. Note that this
2207 can still fail if the rtx is too complex. It won't fail just because
2208 the result is not 'simpler' than the input, however. */
2210 struct simplify_plus_minus_op_data
2212 rtx op;
2213 int neg;
2216 static int
2217 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2219 const struct simplify_plus_minus_op_data *d1 = p1;
2220 const struct simplify_plus_minus_op_data *d2 = p2;
2222 return (commutative_operand_precedence (d2->op)
2223 - commutative_operand_precedence (d1->op));
2226 static rtx
2227 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2228 rtx op1, int force)
2230 struct simplify_plus_minus_op_data ops[8];
2231 rtx result, tem;
2232 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2233 int first, negate, changed;
2234 int i, j;
2236 memset (ops, 0, sizeof ops);
2238 /* Set up the two operands and then expand them until nothing has been
2239 changed. If we run out of room in our array, give up; this should
2240 almost never happen. */
2242 ops[0].op = op0;
2243 ops[0].neg = 0;
2244 ops[1].op = op1;
2245 ops[1].neg = (code == MINUS);
2249 changed = 0;
2251 for (i = 0; i < n_ops; i++)
2253 rtx this_op = ops[i].op;
2254 int this_neg = ops[i].neg;
2255 enum rtx_code this_code = GET_CODE (this_op);
2257 switch (this_code)
2259 case PLUS:
2260 case MINUS:
2261 if (n_ops == 7)
2262 return NULL_RTX;
2264 ops[n_ops].op = XEXP (this_op, 1);
2265 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2266 n_ops++;
2268 ops[i].op = XEXP (this_op, 0);
2269 input_ops++;
2270 changed = 1;
2271 break;
2273 case NEG:
2274 ops[i].op = XEXP (this_op, 0);
2275 ops[i].neg = ! this_neg;
2276 changed = 1;
2277 break;
2279 case CONST:
2280 if (n_ops < 7
2281 && GET_CODE (XEXP (this_op, 0)) == PLUS
2282 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2283 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2285 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2286 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2287 ops[n_ops].neg = this_neg;
2288 n_ops++;
2289 input_consts++;
2290 changed = 1;
2292 break;
2294 case NOT:
2295 /* ~a -> (-a - 1) */
2296 if (n_ops != 7)
2298 ops[n_ops].op = constm1_rtx;
2299 ops[n_ops++].neg = this_neg;
2300 ops[i].op = XEXP (this_op, 0);
2301 ops[i].neg = !this_neg;
2302 changed = 1;
2304 break;
2306 case CONST_INT:
2307 if (this_neg)
2309 ops[i].op = neg_const_int (mode, this_op);
2310 ops[i].neg = 0;
2311 changed = 1;
2313 break;
2315 default:
2316 break;
2320 while (changed);
2322 /* If we only have two operands, we can't do anything. */
2323 if (n_ops <= 2 && !force)
2324 return NULL_RTX;
2326 /* Count the number of CONSTs we didn't split above. */
2327 for (i = 0; i < n_ops; i++)
2328 if (GET_CODE (ops[i].op) == CONST)
2329 input_consts++;
2331 /* Now simplify each pair of operands until nothing changes. The first
2332 time through just simplify constants against each other. */
2334 first = 1;
2337 changed = first;
2339 for (i = 0; i < n_ops - 1; i++)
2340 for (j = i + 1; j < n_ops; j++)
2342 rtx lhs = ops[i].op, rhs = ops[j].op;
2343 int lneg = ops[i].neg, rneg = ops[j].neg;
2345 if (lhs != 0 && rhs != 0
2346 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2348 enum rtx_code ncode = PLUS;
2350 if (lneg != rneg)
2352 ncode = MINUS;
2353 if (lneg)
2354 tem = lhs, lhs = rhs, rhs = tem;
2356 else if (swap_commutative_operands_p (lhs, rhs))
2357 tem = lhs, lhs = rhs, rhs = tem;
2359 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2361 /* Reject "simplifications" that just wrap the two
2362 arguments in a CONST. Failure to do so can result
2363 in infinite recursion with simplify_binary_operation
2364 when it calls us to simplify CONST operations. */
2365 if (tem
2366 && ! (GET_CODE (tem) == CONST
2367 && GET_CODE (XEXP (tem, 0)) == ncode
2368 && XEXP (XEXP (tem, 0), 0) == lhs
2369 && XEXP (XEXP (tem, 0), 1) == rhs)
2370 /* Don't allow -x + -1 -> ~x simplifications in the
2371 first pass. This allows us the chance to combine
2372 the -1 with other constants. */
2373 && ! (first
2374 && GET_CODE (tem) == NOT
2375 && XEXP (tem, 0) == rhs))
2377 lneg &= rneg;
2378 if (GET_CODE (tem) == NEG)
2379 tem = XEXP (tem, 0), lneg = !lneg;
2380 if (GET_CODE (tem) == CONST_INT && lneg)
2381 tem = neg_const_int (mode, tem), lneg = 0;
2383 ops[i].op = tem;
2384 ops[i].neg = lneg;
2385 ops[j].op = NULL_RTX;
2386 changed = 1;
2391 first = 0;
2393 while (changed);
2395 /* Pack all the operands to the lower-numbered entries. */
2396 for (i = 0, j = 0; j < n_ops; j++)
2397 if (ops[j].op)
2398 ops[i++] = ops[j];
2399 n_ops = i;
2401 /* Sort the operations based on swap_commutative_operands_p. */
2402 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2404 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2405 if (n_ops == 2
2406 && GET_CODE (ops[1].op) == CONST_INT
2407 && CONSTANT_P (ops[0].op)
2408 && ops[0].neg)
2409 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2411 /* We suppressed creation of trivial CONST expressions in the
2412 combination loop to avoid recursion. Create one manually now.
2413 The combination loop should have ensured that there is exactly
2414 one CONST_INT, and the sort will have ensured that it is last
2415 in the array and that any other constant will be next-to-last. */
2417 if (n_ops > 1
2418 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2419 && CONSTANT_P (ops[n_ops - 2].op))
2421 rtx value = ops[n_ops - 1].op;
2422 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2423 value = neg_const_int (mode, value);
2424 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2425 n_ops--;
2428 /* Count the number of CONSTs that we generated. */
2429 n_consts = 0;
2430 for (i = 0; i < n_ops; i++)
2431 if (GET_CODE (ops[i].op) == CONST)
2432 n_consts++;
2434 /* Give up if we didn't reduce the number of operands we had. Make
2435 sure we count a CONST as two operands. If we have the same
2436 number of operands, but have made more CONSTs than before, this
2437 is also an improvement, so accept it. */
2438 if (!force
2439 && (n_ops + n_consts > input_ops
2440 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2441 return NULL_RTX;
2443 /* Put a non-negated operand first. If there aren't any, make all
2444 operands positive and negate the whole thing later. */
2446 negate = 0;
2447 for (i = 0; i < n_ops && ops[i].neg; i++)
2448 continue;
2449 if (i == n_ops)
2451 for (i = 0; i < n_ops; i++)
2452 ops[i].neg = 0;
2453 negate = 1;
2455 else if (i != 0)
2457 tem = ops[0].op;
2458 ops[0] = ops[i];
2459 ops[i].op = tem;
2460 ops[i].neg = 1;
2463 /* Now make the result by performing the requested operations. */
2464 result = ops[0].op;
2465 for (i = 1; i < n_ops; i++)
2466 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2467 mode, result, ops[i].op);
2469 return negate ? gen_rtx_NEG (mode, result) : result;
2472 /* Like simplify_binary_operation except used for relational operators.
2473 MODE is the mode of the operands, not that of the result. If MODE
2474 is VOIDmode, both operands must also be VOIDmode and we compare the
2475 operands in "infinite precision".
2477 If no simplification is possible, this function returns zero. Otherwise,
2478 it returns either const_true_rtx or const0_rtx. */
2481 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2482 rtx op0, rtx op1)
2484 int equal, op0lt, op0ltu, op1lt, op1ltu;
2485 rtx tem;
2486 rtx trueop0;
2487 rtx trueop1;
2489 if (mode == VOIDmode
2490 && (GET_MODE (op0) != VOIDmode
2491 || GET_MODE (op1) != VOIDmode))
2492 abort ();
2494 /* If op0 is a compare, extract the comparison arguments from it. */
2495 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2496 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2498 trueop0 = avoid_constant_pool_reference (op0);
2499 trueop1 = avoid_constant_pool_reference (op1);
2501 /* We can't simplify MODE_CC values since we don't know what the
2502 actual comparison is. */
2503 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2504 return 0;
2506 /* Make sure the constant is second. */
2507 if (swap_commutative_operands_p (trueop0, trueop1))
2509 tem = op0, op0 = op1, op1 = tem;
2510 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2511 code = swap_condition (code);
2514 /* For integer comparisons of A and B maybe we can simplify A - B and can
2515 then simplify a comparison of that with zero. If A and B are both either
2516 a register or a CONST_INT, this can't help; testing for these cases will
2517 prevent infinite recursion here and speed things up.
2519 If CODE is an unsigned comparison, then we can never do this optimization,
2520 because it gives an incorrect result if the subtraction wraps around zero.
2521 ANSI C defines unsigned operations such that they never overflow, and
2522 thus such cases can not be ignored. */
2524 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2525 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2526 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2527 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2528 && code != GTU && code != GEU && code != LTU && code != LEU)
2529 return simplify_relational_operation (signed_condition (code),
2530 mode, tem, const0_rtx);
2532 if (flag_unsafe_math_optimizations && code == ORDERED)
2533 return const_true_rtx;
2535 if (flag_unsafe_math_optimizations && code == UNORDERED)
2536 return const0_rtx;
2538 /* For modes without NaNs, if the two operands are equal, we know the
2539 result except if they have side-effects. */
2540 if (! HONOR_NANS (GET_MODE (trueop0))
2541 && rtx_equal_p (trueop0, trueop1)
2542 && ! side_effects_p (trueop0))
2543 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2545 /* If the operands are floating-point constants, see if we can fold
2546 the result. */
2547 else if (GET_CODE (trueop0) == CONST_DOUBLE
2548 && GET_CODE (trueop1) == CONST_DOUBLE
2549 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2551 REAL_VALUE_TYPE d0, d1;
2553 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2554 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2556 /* Comparisons are unordered iff at least one of the values is NaN. */
2557 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2558 switch (code)
2560 case UNEQ:
2561 case UNLT:
2562 case UNGT:
2563 case UNLE:
2564 case UNGE:
2565 case NE:
2566 case UNORDERED:
2567 return const_true_rtx;
2568 case EQ:
2569 case LT:
2570 case GT:
2571 case LE:
2572 case GE:
2573 case LTGT:
2574 case ORDERED:
2575 return const0_rtx;
2576 default:
2577 return 0;
2580 equal = REAL_VALUES_EQUAL (d0, d1);
2581 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2582 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2585 /* Otherwise, see if the operands are both integers. */
2586 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2587 && (GET_CODE (trueop0) == CONST_DOUBLE
2588 || GET_CODE (trueop0) == CONST_INT)
2589 && (GET_CODE (trueop1) == CONST_DOUBLE
2590 || GET_CODE (trueop1) == CONST_INT))
2592 int width = GET_MODE_BITSIZE (mode);
2593 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2594 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2596 /* Get the two words comprising each integer constant. */
2597 if (GET_CODE (trueop0) == CONST_DOUBLE)
2599 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2600 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2602 else
2604 l0u = l0s = INTVAL (trueop0);
2605 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2608 if (GET_CODE (trueop1) == CONST_DOUBLE)
2610 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2611 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2613 else
2615 l1u = l1s = INTVAL (trueop1);
2616 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2619 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2620 we have to sign or zero-extend the values. */
2621 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2623 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2624 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2626 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2627 l0s |= ((HOST_WIDE_INT) (-1) << width);
2629 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2630 l1s |= ((HOST_WIDE_INT) (-1) << width);
2632 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2633 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2635 equal = (h0u == h1u && l0u == l1u);
2636 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2637 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2638 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2639 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2642 /* Otherwise, there are some code-specific tests we can make. */
2643 else
2645 switch (code)
2647 case EQ:
2648 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2649 return const0_rtx;
2650 break;
2652 case NE:
2653 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2654 return const_true_rtx;
2655 break;
2657 case GEU:
2658 /* Unsigned values are never negative. */
2659 if (trueop1 == const0_rtx)
2660 return const_true_rtx;
2661 break;
2663 case LTU:
2664 if (trueop1 == const0_rtx)
2665 return const0_rtx;
2666 break;
2668 case LEU:
2669 /* Unsigned values are never greater than the largest
2670 unsigned value. */
2671 if (GET_CODE (trueop1) == CONST_INT
2672 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2673 && INTEGRAL_MODE_P (mode))
2674 return const_true_rtx;
2675 break;
2677 case GTU:
2678 if (GET_CODE (trueop1) == CONST_INT
2679 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2680 && INTEGRAL_MODE_P (mode))
2681 return const0_rtx;
2682 break;
2684 case LT:
2685 /* Optimize abs(x) < 0.0. */
2686 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2688 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2689 : trueop0;
2690 if (GET_CODE (tem) == ABS)
2691 return const0_rtx;
2693 break;
2695 case GE:
2696 /* Optimize abs(x) >= 0.0. */
2697 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2699 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2700 : trueop0;
2701 if (GET_CODE (tem) == ABS)
2702 return const_true_rtx;
2704 break;
2706 case UNGE:
2707 /* Optimize ! (abs(x) < 0.0). */
2708 if (trueop1 == CONST0_RTX (mode))
2710 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2711 : trueop0;
2712 if (GET_CODE (tem) == ABS)
2713 return const_true_rtx;
2715 break;
2717 default:
2718 break;
2721 return 0;
2724 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2725 as appropriate. */
2726 switch (code)
2728 case EQ:
2729 case UNEQ:
2730 return equal ? const_true_rtx : const0_rtx;
2731 case NE:
2732 case LTGT:
2733 return ! equal ? const_true_rtx : const0_rtx;
2734 case LT:
2735 case UNLT:
2736 return op0lt ? const_true_rtx : const0_rtx;
2737 case GT:
2738 case UNGT:
2739 return op1lt ? const_true_rtx : const0_rtx;
2740 case LTU:
2741 return op0ltu ? const_true_rtx : const0_rtx;
2742 case GTU:
2743 return op1ltu ? const_true_rtx : const0_rtx;
2744 case LE:
2745 case UNLE:
2746 return equal || op0lt ? const_true_rtx : const0_rtx;
2747 case GE:
2748 case UNGE:
2749 return equal || op1lt ? const_true_rtx : const0_rtx;
2750 case LEU:
2751 return equal || op0ltu ? const_true_rtx : const0_rtx;
2752 case GEU:
2753 return equal || op1ltu ? const_true_rtx : const0_rtx;
2754 case ORDERED:
2755 return const_true_rtx;
2756 case UNORDERED:
2757 return const0_rtx;
2758 default:
2759 abort ();
2763 /* Simplify CODE, an operation with result mode MODE and three operands,
2764 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2765 a constant. Return 0 if no simplifications is possible. */
2768 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2769 enum machine_mode op0_mode, rtx op0, rtx op1,
2770 rtx op2)
2772 unsigned int width = GET_MODE_BITSIZE (mode);
2774 /* VOIDmode means "infinite" precision. */
2775 if (width == 0)
2776 width = HOST_BITS_PER_WIDE_INT;
2778 switch (code)
2780 case SIGN_EXTRACT:
2781 case ZERO_EXTRACT:
2782 if (GET_CODE (op0) == CONST_INT
2783 && GET_CODE (op1) == CONST_INT
2784 && GET_CODE (op2) == CONST_INT
2785 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2786 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2788 /* Extracting a bit-field from a constant */
2789 HOST_WIDE_INT val = INTVAL (op0);
2791 if (BITS_BIG_ENDIAN)
2792 val >>= (GET_MODE_BITSIZE (op0_mode)
2793 - INTVAL (op2) - INTVAL (op1));
2794 else
2795 val >>= INTVAL (op2);
2797 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2799 /* First zero-extend. */
2800 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2801 /* If desired, propagate sign bit. */
2802 if (code == SIGN_EXTRACT
2803 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2804 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2807 /* Clear the bits that don't belong in our mode,
2808 unless they and our sign bit are all one.
2809 So we get either a reasonable negative value or a reasonable
2810 unsigned value for this mode. */
2811 if (width < HOST_BITS_PER_WIDE_INT
2812 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2813 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2814 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2816 return GEN_INT (val);
2818 break;
2820 case IF_THEN_ELSE:
2821 if (GET_CODE (op0) == CONST_INT)
2822 return op0 != const0_rtx ? op1 : op2;
2824 /* Convert c ? a : a into "a". */
2825 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2826 return op1;
2828 /* Convert a != b ? a : b into "a". */
2829 if (GET_CODE (op0) == NE
2830 && ! side_effects_p (op0)
2831 && ! HONOR_NANS (mode)
2832 && ! HONOR_SIGNED_ZEROS (mode)
2833 && ((rtx_equal_p (XEXP (op0, 0), op1)
2834 && rtx_equal_p (XEXP (op0, 1), op2))
2835 || (rtx_equal_p (XEXP (op0, 0), op2)
2836 && rtx_equal_p (XEXP (op0, 1), op1))))
2837 return op1;
2839 /* Convert a == b ? a : b into "b". */
2840 if (GET_CODE (op0) == EQ
2841 && ! side_effects_p (op0)
2842 && ! HONOR_NANS (mode)
2843 && ! HONOR_SIGNED_ZEROS (mode)
2844 && ((rtx_equal_p (XEXP (op0, 0), op1)
2845 && rtx_equal_p (XEXP (op0, 1), op2))
2846 || (rtx_equal_p (XEXP (op0, 0), op2)
2847 && rtx_equal_p (XEXP (op0, 1), op1))))
2848 return op2;
2850 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2852 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2853 ? GET_MODE (XEXP (op0, 1))
2854 : GET_MODE (XEXP (op0, 0)));
2855 rtx temp;
2856 if (cmp_mode == VOIDmode)
2857 cmp_mode = op0_mode;
2858 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2859 XEXP (op0, 0), XEXP (op0, 1));
2861 /* See if any simplifications were possible. */
2862 if (temp == const0_rtx)
2863 return op2;
2864 else if (temp == const_true_rtx)
2865 return op1;
2866 else if (temp)
2867 abort ();
2869 /* Look for happy constants in op1 and op2. */
2870 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2872 HOST_WIDE_INT t = INTVAL (op1);
2873 HOST_WIDE_INT f = INTVAL (op2);
2875 if (t == STORE_FLAG_VALUE && f == 0)
2876 code = GET_CODE (op0);
2877 else if (t == 0 && f == STORE_FLAG_VALUE)
2879 enum rtx_code tmp;
2880 tmp = reversed_comparison_code (op0, NULL_RTX);
2881 if (tmp == UNKNOWN)
2882 break;
2883 code = tmp;
2885 else
2886 break;
2888 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2891 break;
2893 case VEC_MERGE:
2894 if (GET_MODE (op0) != mode
2895 || GET_MODE (op1) != mode
2896 || !VECTOR_MODE_P (mode))
2897 abort ();
2898 op2 = avoid_constant_pool_reference (op2);
2899 if (GET_CODE (op2) == CONST_INT)
2901 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2902 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2903 int mask = (1 << n_elts) - 1;
2905 if (!(INTVAL (op2) & mask))
2906 return op1;
2907 if ((INTVAL (op2) & mask) == mask)
2908 return op0;
2910 op0 = avoid_constant_pool_reference (op0);
2911 op1 = avoid_constant_pool_reference (op1);
2912 if (GET_CODE (op0) == CONST_VECTOR
2913 && GET_CODE (op1) == CONST_VECTOR)
2915 rtvec v = rtvec_alloc (n_elts);
2916 unsigned int i;
2918 for (i = 0; i < n_elts; i++)
2919 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2920 ? CONST_VECTOR_ELT (op0, i)
2921 : CONST_VECTOR_ELT (op1, i));
2922 return gen_rtx_CONST_VECTOR (mode, v);
2925 break;
2927 default:
2928 abort ();
2931 return 0;
2934 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2935 Return 0 if no simplifications is possible. */
2937 simplify_subreg (enum machine_mode outermode, rtx op,
2938 enum machine_mode innermode, unsigned int byte)
2940 /* Little bit of sanity checking. */
2941 if (innermode == VOIDmode || outermode == VOIDmode
2942 || innermode == BLKmode || outermode == BLKmode)
2943 abort ();
2945 if (GET_MODE (op) != innermode
2946 && GET_MODE (op) != VOIDmode)
2947 abort ();
2949 if (byte % GET_MODE_SIZE (outermode)
2950 || byte >= GET_MODE_SIZE (innermode))
2951 abort ();
2953 if (outermode == innermode && !byte)
2954 return op;
2956 /* Simplify subregs of vector constants. */
2957 if (GET_CODE (op) == CONST_VECTOR)
2959 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2960 const unsigned int offset = byte / elt_size;
2961 rtx elt;
2963 if (GET_MODE_INNER (innermode) == outermode)
2965 elt = CONST_VECTOR_ELT (op, offset);
2967 /* ?? We probably don't need this copy_rtx because constants
2968 can be shared. ?? */
2970 return copy_rtx (elt);
2972 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2973 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2975 return (gen_rtx_CONST_VECTOR
2976 (outermode,
2977 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2978 &CONST_VECTOR_ELT (op, offset))));
2980 else if (GET_MODE_CLASS (outermode) == MODE_INT
2981 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2983 /* This happens when the target register size is smaller then
2984 the vector mode, and we synthesize operations with vectors
2985 of elements that are smaller than the register size. */
2986 HOST_WIDE_INT sum = 0, high = 0;
2987 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2988 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2989 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2990 int shift = BITS_PER_UNIT * elt_size;
2991 unsigned HOST_WIDE_INT unit_mask;
2993 unit_mask = (unsigned HOST_WIDE_INT) -1
2994 >> (sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - shift);
2996 for (; n_elts--; i += step)
2998 elt = CONST_VECTOR_ELT (op, i);
2999 if (GET_CODE (elt) == CONST_DOUBLE
3000 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
3002 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
3003 elt);
3004 if (! elt)
3005 return NULL_RTX;
3007 if (GET_CODE (elt) != CONST_INT)
3008 return NULL_RTX;
3009 /* Avoid overflow. */
3010 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
3011 return NULL_RTX;
3012 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
3013 sum = (sum << shift) + (INTVAL (elt) & unit_mask);
3015 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
3016 return GEN_INT (trunc_int_for_mode (sum, outermode));
3017 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
3018 return immed_double_const (sum, high, outermode);
3019 else
3020 return NULL_RTX;
3022 else if (GET_MODE_CLASS (outermode) == MODE_INT
3023 && (elt_size % GET_MODE_SIZE (outermode) == 0))
3025 enum machine_mode new_mode
3026 = int_mode_for_mode (GET_MODE_INNER (innermode));
3027 int subbyte = byte % elt_size;
3029 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
3030 if (! op)
3031 return NULL_RTX;
3032 return simplify_subreg (outermode, op, new_mode, subbyte);
3034 else if (GET_MODE_CLASS (outermode) == MODE_INT)
3035 /* This shouldn't happen, but let's not do anything stupid. */
3036 return NULL_RTX;
3039 /* Attempt to simplify constant to non-SUBREG expression. */
3040 if (CONSTANT_P (op))
3042 int offset, part;
3043 unsigned HOST_WIDE_INT val = 0;
3045 if (VECTOR_MODE_P (outermode))
3047 /* Construct a CONST_VECTOR from individual subregs. */
3048 enum machine_mode submode = GET_MODE_INNER (outermode);
3049 int subsize = GET_MODE_UNIT_SIZE (outermode);
3050 int i, elts = GET_MODE_NUNITS (outermode);
3051 rtvec v = rtvec_alloc (elts);
3052 rtx elt;
3054 for (i = 0; i < elts; i++, byte += subsize)
3056 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
3057 /* ??? It would be nice if we could actually make such subregs
3058 on targets that allow such relocations. */
3059 if (byte >= GET_MODE_SIZE (innermode))
3060 elt = CONST0_RTX (submode);
3061 else
3062 elt = simplify_subreg (submode, op, innermode, byte);
3063 if (! elt)
3064 return NULL_RTX;
3065 RTVEC_ELT (v, i) = elt;
3067 return gen_rtx_CONST_VECTOR (outermode, v);
3070 /* ??? This code is partly redundant with code below, but can handle
3071 the subregs of floats and similar corner cases.
3072 Later it we should move all simplification code here and rewrite
3073 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
3074 using SIMPLIFY_SUBREG. */
3075 if (subreg_lowpart_offset (outermode, innermode) == byte
3076 && GET_CODE (op) != CONST_VECTOR)
3078 rtx new = gen_lowpart_if_possible (outermode, op);
3079 if (new)
3080 return new;
3083 /* Similar comment as above apply here. */
3084 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
3085 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
3086 && GET_MODE_CLASS (outermode) == MODE_INT)
3088 rtx new = constant_subword (op,
3089 (byte / UNITS_PER_WORD),
3090 innermode);
3091 if (new)
3092 return new;
3095 if (GET_MODE_CLASS (outermode) != MODE_INT
3096 && GET_MODE_CLASS (outermode) != MODE_CC)
3098 enum machine_mode new_mode = int_mode_for_mode (outermode);
3100 if (new_mode != innermode || byte != 0)
3102 op = simplify_subreg (new_mode, op, innermode, byte);
3103 if (! op)
3104 return NULL_RTX;
3105 return simplify_subreg (outermode, op, new_mode, 0);
3109 offset = byte * BITS_PER_UNIT;
3110 switch (GET_CODE (op))
3112 case CONST_DOUBLE:
3113 if (GET_MODE (op) != VOIDmode)
3114 break;
3116 /* We can't handle this case yet. */
3117 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
3118 return NULL_RTX;
3120 part = offset >= HOST_BITS_PER_WIDE_INT;
3121 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
3122 && BYTES_BIG_ENDIAN)
3123 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
3124 && WORDS_BIG_ENDIAN))
3125 part = !part;
3126 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
3127 offset %= HOST_BITS_PER_WIDE_INT;
3129 /* We've already picked the word we want from a double, so
3130 pretend this is actually an integer. */
3131 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
3133 /* FALLTHROUGH */
3134 case CONST_INT:
3135 if (GET_CODE (op) == CONST_INT)
3136 val = INTVAL (op);
3138 /* We don't handle synthesizing of non-integral constants yet. */
3139 if (GET_MODE_CLASS (outermode) != MODE_INT)
3140 return NULL_RTX;
3142 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
3144 if (WORDS_BIG_ENDIAN)
3145 offset = (GET_MODE_BITSIZE (innermode)
3146 - GET_MODE_BITSIZE (outermode) - offset);
3147 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
3148 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
3149 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
3150 - 2 * (offset % BITS_PER_WORD));
3153 if (offset >= HOST_BITS_PER_WIDE_INT)
3154 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
3155 else
3157 val >>= offset;
3158 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
3159 val = trunc_int_for_mode (val, outermode);
3160 return GEN_INT (val);
3162 default:
3163 break;
3167 /* Changing mode twice with SUBREG => just change it once,
3168 or not at all if changing back op starting mode. */
3169 if (GET_CODE (op) == SUBREG)
3171 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3172 int final_offset = byte + SUBREG_BYTE (op);
3173 rtx new;
3175 if (outermode == innermostmode
3176 && byte == 0 && SUBREG_BYTE (op) == 0)
3177 return SUBREG_REG (op);
3179 /* The SUBREG_BYTE represents offset, as if the value were stored
3180 in memory. Irritating exception is paradoxical subreg, where
3181 we define SUBREG_BYTE to be 0. On big endian machines, this
3182 value should be negative. For a moment, undo this exception. */
3183 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3185 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3186 if (WORDS_BIG_ENDIAN)
3187 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3188 if (BYTES_BIG_ENDIAN)
3189 final_offset += difference % UNITS_PER_WORD;
3191 if (SUBREG_BYTE (op) == 0
3192 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3194 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3195 if (WORDS_BIG_ENDIAN)
3196 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3197 if (BYTES_BIG_ENDIAN)
3198 final_offset += difference % UNITS_PER_WORD;
3201 /* See whether resulting subreg will be paradoxical. */
3202 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3204 /* In nonparadoxical subregs we can't handle negative offsets. */
3205 if (final_offset < 0)
3206 return NULL_RTX;
3207 /* Bail out in case resulting subreg would be incorrect. */
3208 if (final_offset % GET_MODE_SIZE (outermode)
3209 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3210 return NULL_RTX;
3212 else
3214 int offset = 0;
3215 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3217 /* In paradoxical subreg, see if we are still looking on lower part.
3218 If so, our SUBREG_BYTE will be 0. */
3219 if (WORDS_BIG_ENDIAN)
3220 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3221 if (BYTES_BIG_ENDIAN)
3222 offset += difference % UNITS_PER_WORD;
3223 if (offset == final_offset)
3224 final_offset = 0;
3225 else
3226 return NULL_RTX;
3229 /* Recurse for further possible simplifications. */
3230 new = simplify_subreg (outermode, SUBREG_REG (op),
3231 GET_MODE (SUBREG_REG (op)),
3232 final_offset);
3233 if (new)
3234 return new;
3235 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3238 /* SUBREG of a hard register => just change the register number
3239 and/or mode. If the hard register is not valid in that mode,
3240 suppress this simplification. If the hard register is the stack,
3241 frame, or argument pointer, leave this as a SUBREG. */
3243 if (REG_P (op)
3244 && (! REG_FUNCTION_VALUE_P (op)
3245 || ! rtx_equal_function_value_matters)
3246 && REGNO (op) < FIRST_PSEUDO_REGISTER
3247 #ifdef CANNOT_CHANGE_MODE_CLASS
3248 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3249 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3250 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3251 #endif
3252 && ((reload_completed && !frame_pointer_needed)
3253 || (REGNO (op) != FRAME_POINTER_REGNUM
3254 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3255 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3256 #endif
3258 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3259 && REGNO (op) != ARG_POINTER_REGNUM
3260 #endif
3261 && REGNO (op) != STACK_POINTER_REGNUM
3262 && subreg_offset_representable_p (REGNO (op), innermode,
3263 byte, outermode))
3265 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3266 int final_regno = subreg_hard_regno (tem, 0);
3268 /* ??? We do allow it if the current REG is not valid for
3269 its mode. This is a kludge to work around how float/complex
3270 arguments are passed on 32-bit SPARC and should be fixed. */
3271 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3272 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3274 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3276 /* Propagate original regno. We don't have any way to specify
3277 the offset inside original regno, so do so only for lowpart.
3278 The information is used only by alias analysis that can not
3279 grog partial register anyway. */
3281 if (subreg_lowpart_offset (outermode, innermode) == byte)
3282 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3283 return x;
3287 /* If we have a SUBREG of a register that we are replacing and we are
3288 replacing it with a MEM, make a new MEM and try replacing the
3289 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3290 or if we would be widening it. */
3292 if (GET_CODE (op) == MEM
3293 && ! mode_dependent_address_p (XEXP (op, 0))
3294 /* Allow splitting of volatile memory references in case we don't
3295 have instruction to move the whole thing. */
3296 && (! MEM_VOLATILE_P (op)
3297 || ! have_insn_for (SET, innermode))
3298 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3299 return adjust_address_nv (op, outermode, byte);
3301 /* Handle complex values represented as CONCAT
3302 of real and imaginary part. */
3303 if (GET_CODE (op) == CONCAT)
3305 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3306 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3307 unsigned int final_offset;
3308 rtx res;
3310 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3311 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3312 if (res)
3313 return res;
3314 /* We can at least simplify it by referring directly to the relevant part. */
3315 return gen_rtx_SUBREG (outermode, part, final_offset);
3318 return NULL_RTX;
3320 /* Make a SUBREG operation or equivalent if it folds. */
3323 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3324 enum machine_mode innermode, unsigned int byte)
3326 rtx new;
3327 /* Little bit of sanity checking. */
3328 if (innermode == VOIDmode || outermode == VOIDmode
3329 || innermode == BLKmode || outermode == BLKmode)
3330 abort ();
3332 if (GET_MODE (op) != innermode
3333 && GET_MODE (op) != VOIDmode)
3334 abort ();
3336 if (byte % GET_MODE_SIZE (outermode)
3337 || byte >= GET_MODE_SIZE (innermode))
3338 abort ();
3340 if (GET_CODE (op) == QUEUED)
3341 return NULL_RTX;
3343 new = simplify_subreg (outermode, op, innermode, byte);
3344 if (new)
3345 return new;
3347 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3348 return NULL_RTX;
3350 return gen_rtx_SUBREG (outermode, op, byte);
3352 /* Simplify X, an rtx expression.
3354 Return the simplified expression or NULL if no simplifications
3355 were possible.
3357 This is the preferred entry point into the simplification routines;
3358 however, we still allow passes to call the more specific routines.
3360 Right now GCC has three (yes, three) major bodies of RTL simplification
3361 code that need to be unified.
3363 1. fold_rtx in cse.c. This code uses various CSE specific
3364 information to aid in RTL simplification.
3366 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3367 it uses combine specific information to aid in RTL
3368 simplification.
3370 3. The routines in this file.
3373 Long term we want to only have one body of simplification code; to
3374 get to that state I recommend the following steps:
3376 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3377 which are not pass dependent state into these routines.
3379 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3380 use this routine whenever possible.
3382 3. Allow for pass dependent state to be provided to these
3383 routines and add simplifications based on the pass dependent
3384 state. Remove code from cse.c & combine.c that becomes
3385 redundant/dead.
3387 It will take time, but ultimately the compiler will be easier to
3388 maintain and improve. It's totally silly that when we add a
3389 simplification that it needs to be added to 4 places (3 for RTL
3390 simplification and 1 for tree simplification. */
3393 simplify_rtx (rtx x)
3395 enum rtx_code code = GET_CODE (x);
3396 enum machine_mode mode = GET_MODE (x);
3397 rtx temp;
3399 switch (GET_RTX_CLASS (code))
3401 case '1':
3402 return simplify_unary_operation (code, mode,
3403 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3404 case 'c':
3405 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3406 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3408 /* Fall through.... */
3410 case '2':
3411 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3413 case '3':
3414 case 'b':
3415 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3416 XEXP (x, 0), XEXP (x, 1),
3417 XEXP (x, 2));
3419 case '<':
3420 temp = simplify_relational_operation (code,
3421 ((GET_MODE (XEXP (x, 0))
3422 != VOIDmode)
3423 ? GET_MODE (XEXP (x, 0))
3424 : GET_MODE (XEXP (x, 1))),
3425 XEXP (x, 0), XEXP (x, 1));
3426 #ifdef FLOAT_STORE_FLAG_VALUE
3427 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3429 if (temp == const0_rtx)
3430 temp = CONST0_RTX (mode);
3431 else
3432 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3433 mode);
3435 #endif
3436 return temp;
3438 case 'x':
3439 if (code == SUBREG)
3440 return simplify_gen_subreg (mode, SUBREG_REG (x),
3441 GET_MODE (SUBREG_REG (x)),
3442 SUBREG_BYTE (x));
3443 if (code == CONSTANT_P_RTX)
3445 if (CONSTANT_P (XEXP (x, 0)))
3446 return const1_rtx;
3448 break;
3450 case 'o':
3451 if (code == LO_SUM)
3453 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3454 if (GET_CODE (XEXP (x, 0)) == HIGH
3455 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3456 return XEXP (x, 1);
3458 break;
3460 default:
3461 break;
3463 return NULL;