Forgot to mention the PR number:
[official-gcc.git] / gcc / simplify-rtx.c
blob7f6b549b0fbd2da710ba7d7249af5d46874707d6
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
59 rtx, rtx);
61 /* Negate a CONST_INT rtx, truncating (because a conversion from a
62 maximally negative number can overflow). */
63 static rtx
64 neg_const_int (enum machine_mode mode, rtx i)
66 return gen_int_mode (- INTVAL (i), mode);
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
73 rtx
74 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
75 rtx op1)
77 rtx tem;
79 /* Put complex operands first and constants second if commutative. */
80 if (GET_RTX_CLASS (code) == 'c'
81 && swap_commutative_operands_p (op0, op1))
82 tem = op0, op0 = op1, op1 = tem;
84 /* If this simplifies, do it. */
85 tem = simplify_binary_operation (code, mode, op0, op1);
86 if (tem)
87 return tem;
89 /* Handle addition and subtraction specially. Otherwise, just form
90 the operation. */
92 if (code == PLUS || code == MINUS)
94 tem = simplify_plus_minus (code, mode, op0, op1, 1);
95 if (tem)
96 return tem;
99 return gen_rtx_fmt_ee (code, mode, op0, op1);
102 /* If X is a MEM referencing the constant pool, return the real value.
103 Otherwise return X. */
105 avoid_constant_pool_reference (rtx x)
107 rtx c, tmp, addr;
108 enum machine_mode cmode;
110 switch (GET_CODE (x))
112 case MEM:
113 break;
115 case FLOAT_EXTEND:
116 /* Handle float extensions of constant pool references. */
117 tmp = XEXP (x, 0);
118 c = avoid_constant_pool_reference (tmp);
119 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
121 REAL_VALUE_TYPE d;
123 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
124 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
126 return x;
128 default:
129 return x;
132 addr = XEXP (x, 0);
134 /* Call target hook to avoid the effects of -fpic etc.... */
135 addr = (*targetm.delegitimize_address) (addr);
137 if (GET_CODE (addr) == LO_SUM)
138 addr = XEXP (addr, 1);
140 if (GET_CODE (addr) != SYMBOL_REF
141 || ! CONSTANT_POOL_ADDRESS_P (addr))
142 return x;
144 c = get_pool_constant (addr);
145 cmode = get_pool_mode (addr);
147 /* If we're accessing the constant in a different mode than it was
148 originally stored, attempt to fix that up via subreg simplifications.
149 If that fails we have no choice but to return the original memory. */
150 if (cmode != GET_MODE (x))
152 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
153 return c ? c : x;
156 return c;
159 /* Make a unary operation by first seeing if it folds and otherwise making
160 the specified operation. */
163 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
164 enum machine_mode op_mode)
166 rtx tem;
168 /* If this simplifies, use it. */
169 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
170 return tem;
172 return gen_rtx_fmt_e (code, mode, op);
175 /* Likewise for ternary operations. */
178 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
179 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
181 rtx tem;
183 /* If this simplifies, use it. */
184 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
185 op0, op1, op2)))
186 return tem;
188 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
191 /* Likewise, for relational operations.
192 CMP_MODE specifies mode comparison is done in.
196 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
197 enum machine_mode cmp_mode, rtx op0, rtx op1)
199 rtx tem;
201 if (cmp_mode == VOIDmode)
202 cmp_mode = GET_MODE (op0);
203 if (cmp_mode == VOIDmode)
204 cmp_mode = GET_MODE (op1);
206 if (cmp_mode != VOIDmode)
208 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
210 if (tem)
212 #ifdef FLOAT_STORE_FLAG_VALUE
213 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
215 REAL_VALUE_TYPE val;
216 if (tem == const0_rtx)
217 return CONST0_RTX (mode);
218 if (tem != const_true_rtx)
219 abort ();
220 val = FLOAT_STORE_FLAG_VALUE (mode);
221 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
223 #endif
224 return tem;
228 /* For the following tests, ensure const0_rtx is op1. */
229 if (swap_commutative_operands_p (op0, op1)
230 || (op0 == const0_rtx && op1 != const0_rtx))
231 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
233 /* If op0 is a compare, extract the comparison arguments from it. */
234 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
235 return simplify_gen_relational (code, mode, VOIDmode,
236 XEXP (op0, 0), XEXP (op0, 1));
238 /* If op0 is a comparison, extract the comparison arguments form it. */
239 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
241 if (code == NE)
243 if (GET_MODE (op0) == mode)
244 return op0;
245 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
246 XEXP (op0, 0), XEXP (op0, 1));
248 else if (code == EQ)
250 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
251 if (new != UNKNOWN)
252 return simplify_gen_relational (new, mode, VOIDmode,
253 XEXP (op0, 0), XEXP (op0, 1));
257 return gen_rtx_fmt_ee (code, mode, op0, op1);
260 /* Replace all occurrences of OLD in X with NEW and try to simplify the
261 resulting RTX. Return a new RTX which is as simplified as possible. */
264 simplify_replace_rtx (rtx x, rtx old, rtx new)
266 enum rtx_code code = GET_CODE (x);
267 enum machine_mode mode = GET_MODE (x);
268 enum machine_mode op_mode;
269 rtx op0, op1, op2;
271 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
272 to build a new expression substituting recursively. If we can't do
273 anything, return our input. */
275 if (x == old)
276 return new;
278 switch (GET_RTX_CLASS (code))
280 case '1':
281 op0 = XEXP (x, 0);
282 op_mode = GET_MODE (op0);
283 op0 = simplify_replace_rtx (op0, old, new);
284 if (op0 == XEXP (x, 0))
285 return x;
286 return simplify_gen_unary (code, mode, op0, op_mode);
288 case '2':
289 case 'c':
290 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
291 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
292 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
293 return x;
294 return simplify_gen_binary (code, mode, op0, op1);
296 case '<':
297 op0 = XEXP (x, 0);
298 op1 = XEXP (x, 1);
299 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
300 op0 = simplify_replace_rtx (op0, old, new);
301 op1 = simplify_replace_rtx (op1, old, new);
302 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303 return x;
304 return simplify_gen_relational (code, mode, op_mode, op0, op1);
306 case '3':
307 case 'b':
308 op0 = XEXP (x, 0);
309 op_mode = GET_MODE (op0);
310 op0 = simplify_replace_rtx (op0, old, new);
311 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
312 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
313 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
314 return x;
315 if (op_mode == VOIDmode)
316 op_mode = GET_MODE (op0);
317 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
319 case 'x':
320 /* The only case we try to handle is a SUBREG. */
321 if (code == SUBREG)
323 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
324 if (op0 == SUBREG_REG (x))
325 return x;
326 op0 = simplify_gen_subreg (GET_MODE (x), op0,
327 GET_MODE (SUBREG_REG (x)),
328 SUBREG_BYTE (x));
329 return op0 ? op0 : x;
331 break;
333 case 'o':
334 if (code == MEM)
336 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
337 if (op0 == XEXP (x, 0))
338 return x;
339 return replace_equiv_address_nv (x, op0);
341 else if (code == LO_SUM)
343 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
344 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
346 /* (lo_sum (high x) x) -> x */
347 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
348 return op1;
350 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351 return x;
352 return gen_rtx_LO_SUM (mode, op0, op1);
354 else if (code == REG)
356 if (REG_P (old) && REGNO (x) == REGNO (old))
357 return new;
359 break;
361 default:
362 break;
364 return x;
367 /* Try to simplify a unary operation CODE whose output mode is to be
368 MODE with input operand OP whose mode was originally OP_MODE.
369 Return zero if no simplification can be made. */
371 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
372 rtx op, enum machine_mode op_mode)
374 unsigned int width = GET_MODE_BITSIZE (mode);
375 rtx trueop = avoid_constant_pool_reference (op);
377 if (code == VEC_DUPLICATE)
379 if (!VECTOR_MODE_P (mode))
380 abort ();
381 if (GET_MODE (trueop) != VOIDmode
382 && !VECTOR_MODE_P (GET_MODE (trueop))
383 && GET_MODE_INNER (mode) != GET_MODE (trueop))
384 abort ();
385 if (GET_MODE (trueop) != VOIDmode
386 && VECTOR_MODE_P (GET_MODE (trueop))
387 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
388 abort ();
389 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
390 || GET_CODE (trueop) == CONST_VECTOR)
392 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
393 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
394 rtvec v = rtvec_alloc (n_elts);
395 unsigned int i;
397 if (GET_CODE (trueop) != CONST_VECTOR)
398 for (i = 0; i < n_elts; i++)
399 RTVEC_ELT (v, i) = trueop;
400 else
402 enum machine_mode inmode = GET_MODE (trueop);
403 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
404 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
406 if (in_n_elts >= n_elts || n_elts % in_n_elts)
407 abort ();
408 for (i = 0; i < n_elts; i++)
409 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
411 return gen_rtx_CONST_VECTOR (mode, v);
414 else if (GET_CODE (op) == CONST)
415 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
417 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
419 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
420 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
421 enum machine_mode opmode = GET_MODE (trueop);
422 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
423 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
424 rtvec v = rtvec_alloc (n_elts);
425 unsigned int i;
427 if (op_n_elts != n_elts)
428 abort ();
430 for (i = 0; i < n_elts; i++)
432 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
433 CONST_VECTOR_ELT (trueop, i),
434 GET_MODE_INNER (opmode));
435 if (!x)
436 return 0;
437 RTVEC_ELT (v, i) = x;
439 return gen_rtx_CONST_VECTOR (mode, v);
442 /* The order of these tests is critical so that, for example, we don't
443 check the wrong mode (input vs. output) for a conversion operation,
444 such as FIX. At some point, this should be simplified. */
446 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
447 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
449 HOST_WIDE_INT hv, lv;
450 REAL_VALUE_TYPE d;
452 if (GET_CODE (trueop) == CONST_INT)
453 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
454 else
455 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
457 REAL_VALUE_FROM_INT (d, lv, hv, mode);
458 d = real_value_truncate (mode, d);
459 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
461 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
462 && (GET_CODE (trueop) == CONST_DOUBLE
463 || GET_CODE (trueop) == CONST_INT))
465 HOST_WIDE_INT hv, lv;
466 REAL_VALUE_TYPE d;
468 if (GET_CODE (trueop) == CONST_INT)
469 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
470 else
471 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
473 if (op_mode == VOIDmode)
475 /* We don't know how to interpret negative-looking numbers in
476 this case, so don't try to fold those. */
477 if (hv < 0)
478 return 0;
480 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
482 else
483 hv = 0, lv &= GET_MODE_MASK (op_mode);
485 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
486 d = real_value_truncate (mode, d);
487 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
490 if (GET_CODE (trueop) == CONST_INT
491 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
493 HOST_WIDE_INT arg0 = INTVAL (trueop);
494 HOST_WIDE_INT val;
496 switch (code)
498 case NOT:
499 val = ~ arg0;
500 break;
502 case NEG:
503 val = - arg0;
504 break;
506 case ABS:
507 val = (arg0 >= 0 ? arg0 : - arg0);
508 break;
510 case FFS:
511 /* Don't use ffs here. Instead, get low order bit and then its
512 number. If arg0 is zero, this will return 0, as desired. */
513 arg0 &= GET_MODE_MASK (mode);
514 val = exact_log2 (arg0 & (- arg0)) + 1;
515 break;
517 case CLZ:
518 arg0 &= GET_MODE_MASK (mode);
519 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
521 else
522 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
523 break;
525 case CTZ:
526 arg0 &= GET_MODE_MASK (mode);
527 if (arg0 == 0)
529 /* Even if the value at zero is undefined, we have to come
530 up with some replacement. Seems good enough. */
531 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
532 val = GET_MODE_BITSIZE (mode);
534 else
535 val = exact_log2 (arg0 & -arg0);
536 break;
538 case POPCOUNT:
539 arg0 &= GET_MODE_MASK (mode);
540 val = 0;
541 while (arg0)
542 val++, arg0 &= arg0 - 1;
543 break;
545 case PARITY:
546 arg0 &= GET_MODE_MASK (mode);
547 val = 0;
548 while (arg0)
549 val++, arg0 &= arg0 - 1;
550 val &= 1;
551 break;
553 case TRUNCATE:
554 val = arg0;
555 break;
557 case ZERO_EXTEND:
558 /* When zero-extending a CONST_INT, we need to know its
559 original mode. */
560 if (op_mode == VOIDmode)
561 abort ();
562 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
564 /* If we were really extending the mode,
565 we would have to distinguish between zero-extension
566 and sign-extension. */
567 if (width != GET_MODE_BITSIZE (op_mode))
568 abort ();
569 val = arg0;
571 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
572 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
573 else
574 return 0;
575 break;
577 case SIGN_EXTEND:
578 if (op_mode == VOIDmode)
579 op_mode = mode;
580 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
582 /* If we were really extending the mode,
583 we would have to distinguish between zero-extension
584 and sign-extension. */
585 if (width != GET_MODE_BITSIZE (op_mode))
586 abort ();
587 val = arg0;
589 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
592 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
593 if (val
594 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
595 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
597 else
598 return 0;
599 break;
601 case SQRT:
602 case FLOAT_EXTEND:
603 case FLOAT_TRUNCATE:
604 case SS_TRUNCATE:
605 case US_TRUNCATE:
606 return 0;
608 default:
609 abort ();
612 val = trunc_int_for_mode (val, mode);
614 return GEN_INT (val);
617 /* We can do some operations on integer CONST_DOUBLEs. Also allow
618 for a DImode operation on a CONST_INT. */
619 else if (GET_MODE (trueop) == VOIDmode
620 && width <= HOST_BITS_PER_WIDE_INT * 2
621 && (GET_CODE (trueop) == CONST_DOUBLE
622 || GET_CODE (trueop) == CONST_INT))
624 unsigned HOST_WIDE_INT l1, lv;
625 HOST_WIDE_INT h1, hv;
627 if (GET_CODE (trueop) == CONST_DOUBLE)
628 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
629 else
630 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
632 switch (code)
634 case NOT:
635 lv = ~ l1;
636 hv = ~ h1;
637 break;
639 case NEG:
640 neg_double (l1, h1, &lv, &hv);
641 break;
643 case ABS:
644 if (h1 < 0)
645 neg_double (l1, h1, &lv, &hv);
646 else
647 lv = l1, hv = h1;
648 break;
650 case FFS:
651 hv = 0;
652 if (l1 == 0)
654 if (h1 == 0)
655 lv = 0;
656 else
657 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
659 else
660 lv = exact_log2 (l1 & -l1) + 1;
661 break;
663 case CLZ:
664 hv = 0;
665 if (h1 != 0)
666 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
667 - HOST_BITS_PER_WIDE_INT;
668 else if (l1 != 0)
669 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
670 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
671 lv = GET_MODE_BITSIZE (mode);
672 break;
674 case CTZ:
675 hv = 0;
676 if (l1 != 0)
677 lv = exact_log2 (l1 & -l1);
678 else if (h1 != 0)
679 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
680 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
681 lv = GET_MODE_BITSIZE (mode);
682 break;
684 case POPCOUNT:
685 hv = 0;
686 lv = 0;
687 while (l1)
688 lv++, l1 &= l1 - 1;
689 while (h1)
690 lv++, h1 &= h1 - 1;
691 break;
693 case PARITY:
694 hv = 0;
695 lv = 0;
696 while (l1)
697 lv++, l1 &= l1 - 1;
698 while (h1)
699 lv++, h1 &= h1 - 1;
700 lv &= 1;
701 break;
703 case TRUNCATE:
704 /* This is just a change-of-mode, so do nothing. */
705 lv = l1, hv = h1;
706 break;
708 case ZERO_EXTEND:
709 if (op_mode == VOIDmode)
710 abort ();
712 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
713 return 0;
715 hv = 0;
716 lv = l1 & GET_MODE_MASK (op_mode);
717 break;
719 case SIGN_EXTEND:
720 if (op_mode == VOIDmode
721 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
722 return 0;
723 else
725 lv = l1 & GET_MODE_MASK (op_mode);
726 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
727 && (lv & ((HOST_WIDE_INT) 1
728 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
729 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
731 hv = HWI_SIGN_EXTEND (lv);
733 break;
735 case SQRT:
736 return 0;
738 default:
739 return 0;
742 return immed_double_const (lv, hv, mode);
745 else if (GET_CODE (trueop) == CONST_DOUBLE
746 && GET_MODE_CLASS (mode) == MODE_FLOAT)
748 REAL_VALUE_TYPE d, t;
749 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
751 switch (code)
753 case SQRT:
754 if (HONOR_SNANS (mode) && real_isnan (&d))
755 return 0;
756 real_sqrt (&t, mode, &d);
757 d = t;
758 break;
759 case ABS:
760 d = REAL_VALUE_ABS (d);
761 break;
762 case NEG:
763 d = REAL_VALUE_NEGATE (d);
764 break;
765 case FLOAT_TRUNCATE:
766 d = real_value_truncate (mode, d);
767 break;
768 case FLOAT_EXTEND:
769 /* All this does is change the mode. */
770 break;
771 case FIX:
772 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
773 break;
775 default:
776 abort ();
778 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
781 else if (GET_CODE (trueop) == CONST_DOUBLE
782 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
783 && GET_MODE_CLASS (mode) == MODE_INT
784 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
786 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
787 operators are intentionally left unspecified (to ease implementation
788 by target backends), for consistency, this routine implements the
789 same semantics for constant folding as used by the middle-end. */
791 HOST_WIDE_INT xh, xl, th, tl;
792 REAL_VALUE_TYPE x, t;
793 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
794 switch (code)
796 case FIX:
797 if (REAL_VALUE_ISNAN (x))
798 return const0_rtx;
800 /* Test against the signed upper bound. */
801 if (width > HOST_BITS_PER_WIDE_INT)
803 th = ((unsigned HOST_WIDE_INT) 1
804 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
805 tl = -1;
807 else
809 th = 0;
810 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
812 real_from_integer (&t, VOIDmode, tl, th, 0);
813 if (REAL_VALUES_LESS (t, x))
815 xh = th;
816 xl = tl;
817 break;
820 /* Test against the signed lower bound. */
821 if (width > HOST_BITS_PER_WIDE_INT)
823 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
824 tl = 0;
826 else
828 th = -1;
829 tl = (HOST_WIDE_INT) -1 << (width - 1);
831 real_from_integer (&t, VOIDmode, tl, th, 0);
832 if (REAL_VALUES_LESS (x, t))
834 xh = th;
835 xl = tl;
836 break;
838 REAL_VALUE_TO_INT (&xl, &xh, x);
839 break;
841 case UNSIGNED_FIX:
842 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
843 return const0_rtx;
845 /* Test against the unsigned upper bound. */
846 if (width == 2*HOST_BITS_PER_WIDE_INT)
848 th = -1;
849 tl = -1;
851 else if (width >= HOST_BITS_PER_WIDE_INT)
853 th = ((unsigned HOST_WIDE_INT) 1
854 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
855 tl = -1;
857 else
859 th = 0;
860 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
862 real_from_integer (&t, VOIDmode, tl, th, 1);
863 if (REAL_VALUES_LESS (t, x))
865 xh = th;
866 xl = tl;
867 break;
870 REAL_VALUE_TO_INT (&xl, &xh, x);
871 break;
873 default:
874 abort ();
876 return immed_double_const (xl, xh, mode);
879 /* This was formerly used only for non-IEEE float.
880 eggert@twinsun.com says it is safe for IEEE also. */
881 else
883 enum rtx_code reversed;
884 rtx temp;
886 /* There are some simplifications we can do even if the operands
887 aren't constant. */
888 switch (code)
890 case NOT:
891 /* (not (not X)) == X. */
892 if (GET_CODE (op) == NOT)
893 return XEXP (op, 0);
895 /* (not (eq X Y)) == (ne X Y), etc. */
896 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
897 && (mode == BImode || STORE_FLAG_VALUE == -1)
898 && ((reversed = reversed_comparison_code (op, NULL_RTX))
899 != UNKNOWN))
900 return simplify_gen_relational (reversed, mode, VOIDmode,
901 XEXP (op, 0), XEXP (op, 1));
903 /* (not (plus X -1)) can become (neg X). */
904 if (GET_CODE (op) == PLUS
905 && XEXP (op, 1) == constm1_rtx)
906 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
908 /* Similarly, (not (neg X)) is (plus X -1). */
909 if (GET_CODE (op) == NEG)
910 return plus_constant (XEXP (op, 0), -1);
912 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
913 if (GET_CODE (op) == XOR
914 && GET_CODE (XEXP (op, 1)) == CONST_INT
915 && (temp = simplify_unary_operation (NOT, mode,
916 XEXP (op, 1),
917 mode)) != 0)
918 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
921 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
922 operands other than 1, but that is not valid. We could do a
923 similar simplification for (not (lshiftrt C X)) where C is
924 just the sign bit, but this doesn't seem common enough to
925 bother with. */
926 if (GET_CODE (op) == ASHIFT
927 && XEXP (op, 0) == const1_rtx)
929 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
930 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
933 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
934 by reversing the comparison code if valid. */
935 if (STORE_FLAG_VALUE == -1
936 && GET_RTX_CLASS (GET_CODE (op)) == '<'
937 && (reversed = reversed_comparison_code (op, NULL_RTX))
938 != UNKNOWN)
939 return simplify_gen_relational (reversed, mode, VOIDmode,
940 XEXP (op, 0), XEXP (op, 1));
942 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
943 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
944 so we can perform the above simplification. */
946 if (STORE_FLAG_VALUE == -1
947 && GET_CODE (op) == ASHIFTRT
948 && GET_CODE (XEXP (op, 1)) == CONST_INT
949 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
950 return simplify_gen_relational (GE, mode, VOIDmode,
951 XEXP (op, 0), const0_rtx);
953 break;
955 case NEG:
956 /* (neg (neg X)) == X. */
957 if (GET_CODE (op) == NEG)
958 return XEXP (op, 0);
960 /* (neg (plus X 1)) can become (not X). */
961 if (GET_CODE (op) == PLUS
962 && XEXP (op, 1) == const1_rtx)
963 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
965 /* Similarly, (neg (not X)) is (plus X 1). */
966 if (GET_CODE (op) == NOT)
967 return plus_constant (XEXP (op, 0), 1);
969 /* (neg (minus X Y)) can become (minus Y X). This transformation
970 isn't safe for modes with signed zeros, since if X and Y are
971 both +0, (minus Y X) is the same as (minus X Y). If the
972 rounding mode is towards +infinity (or -infinity) then the two
973 expressions will be rounded differently. */
974 if (GET_CODE (op) == MINUS
975 && !HONOR_SIGNED_ZEROS (mode)
976 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
977 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
978 XEXP (op, 0));
980 if (GET_CODE (op) == PLUS
981 && !HONOR_SIGNED_ZEROS (mode)
982 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
984 /* (neg (plus A C)) is simplified to (minus -C A). */
985 if (GET_CODE (XEXP (op, 1)) == CONST_INT
986 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
988 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
989 mode);
990 if (temp)
991 return simplify_gen_binary (MINUS, mode, temp,
992 XEXP (op, 0));
995 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
996 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
997 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1000 /* (neg (mult A B)) becomes (mult (neg A) B).
1001 This works even for floating-point values. */
1002 if (GET_CODE (op) == MULT
1003 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1005 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1006 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1009 /* NEG commutes with ASHIFT since it is multiplication. Only do
1010 this if we can then eliminate the NEG (e.g., if the operand
1011 is a constant). */
1012 if (GET_CODE (op) == ASHIFT)
1014 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1015 mode);
1016 if (temp)
1017 return simplify_gen_binary (ASHIFT, mode, temp,
1018 XEXP (op, 1));
1021 break;
1023 case SIGN_EXTEND:
1024 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1025 becomes just the MINUS if its mode is MODE. This allows
1026 folding switch statements on machines using casesi (such as
1027 the VAX). */
1028 if (GET_CODE (op) == TRUNCATE
1029 && GET_MODE (XEXP (op, 0)) == mode
1030 && GET_CODE (XEXP (op, 0)) == MINUS
1031 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1032 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1033 return XEXP (op, 0);
1035 /* Check for a sign extension of a subreg of a promoted
1036 variable, where the promotion is sign-extended, and the
1037 target mode is the same as the variable's promotion. */
1038 if (GET_CODE (op) == SUBREG
1039 && SUBREG_PROMOTED_VAR_P (op)
1040 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1041 && GET_MODE (XEXP (op, 0)) == mode)
1042 return XEXP (op, 0);
1044 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1045 if (! POINTERS_EXTEND_UNSIGNED
1046 && mode == Pmode && GET_MODE (op) == ptr_mode
1047 && (CONSTANT_P (op)
1048 || (GET_CODE (op) == SUBREG
1049 && GET_CODE (SUBREG_REG (op)) == REG
1050 && REG_POINTER (SUBREG_REG (op))
1051 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1052 return convert_memory_address (Pmode, op);
1053 #endif
1054 break;
1056 case ZERO_EXTEND:
1057 /* Check for a zero extension of a subreg of a promoted
1058 variable, where the promotion is zero-extended, and the
1059 target mode is the same as the variable's promotion. */
1060 if (GET_CODE (op) == SUBREG
1061 && SUBREG_PROMOTED_VAR_P (op)
1062 && SUBREG_PROMOTED_UNSIGNED_P (op)
1063 && GET_MODE (XEXP (op, 0)) == mode)
1064 return XEXP (op, 0);
1066 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1067 if (POINTERS_EXTEND_UNSIGNED > 0
1068 && mode == Pmode && GET_MODE (op) == ptr_mode
1069 && (CONSTANT_P (op)
1070 || (GET_CODE (op) == SUBREG
1071 && GET_CODE (SUBREG_REG (op)) == REG
1072 && REG_POINTER (SUBREG_REG (op))
1073 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1074 return convert_memory_address (Pmode, op);
1075 #endif
1076 break;
1078 default:
1079 break;
1082 return 0;
1086 /* Subroutine of simplify_binary_operation to simplify a commutative,
1087 associative binary operation CODE with result mode MODE, operating
1088 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1089 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1090 canonicalization is possible. */
1092 static rtx
1093 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1094 rtx op0, rtx op1)
1096 rtx tem;
1098 /* Linearize the operator to the left. */
1099 if (GET_CODE (op1) == code)
1101 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1102 if (GET_CODE (op0) == code)
1104 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1105 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1108 /* "a op (b op c)" becomes "(b op c) op a". */
1109 if (! swap_commutative_operands_p (op1, op0))
1110 return simplify_gen_binary (code, mode, op1, op0);
1112 tem = op0;
1113 op0 = op1;
1114 op1 = tem;
1117 if (GET_CODE (op0) == code)
1119 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1120 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1122 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1123 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1126 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1127 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1128 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1129 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1130 if (tem != 0)
1131 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1133 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1134 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1135 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1136 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1137 if (tem != 0)
1138 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1141 return 0;
1144 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1145 and OP1. Return 0 if no simplification is possible.
1147 Don't use this for relational operations such as EQ or LT.
1148 Use simplify_relational_operation instead. */
1150 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1151 rtx op0, rtx op1)
1153 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1154 HOST_WIDE_INT val;
1155 unsigned int width = GET_MODE_BITSIZE (mode);
1156 rtx trueop0, trueop1;
1157 rtx tem;
1159 /* Relational operations don't work here. We must know the mode
1160 of the operands in order to do the comparison correctly.
1161 Assuming a full word can give incorrect results.
1162 Consider comparing 128 with -128 in QImode. */
1164 if (GET_RTX_CLASS (code) == '<')
1165 abort ();
1167 /* Make sure the constant is second. */
1168 if (GET_RTX_CLASS (code) == 'c'
1169 && swap_commutative_operands_p (op0, op1))
1171 tem = op0, op0 = op1, op1 = tem;
1174 trueop0 = avoid_constant_pool_reference (op0);
1175 trueop1 = avoid_constant_pool_reference (op1);
1177 if (VECTOR_MODE_P (mode)
1178 && GET_CODE (trueop0) == CONST_VECTOR
1179 && GET_CODE (trueop1) == CONST_VECTOR)
1181 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1182 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1183 enum machine_mode op0mode = GET_MODE (trueop0);
1184 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1185 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1186 enum machine_mode op1mode = GET_MODE (trueop1);
1187 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1188 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1189 rtvec v = rtvec_alloc (n_elts);
1190 unsigned int i;
1192 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1193 abort ();
1195 for (i = 0; i < n_elts; i++)
1197 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1198 CONST_VECTOR_ELT (trueop0, i),
1199 CONST_VECTOR_ELT (trueop1, i));
1200 if (!x)
1201 return 0;
1202 RTVEC_ELT (v, i) = x;
1205 return gen_rtx_CONST_VECTOR (mode, v);
1208 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1209 && GET_CODE (trueop0) == CONST_DOUBLE
1210 && GET_CODE (trueop1) == CONST_DOUBLE
1211 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1213 REAL_VALUE_TYPE f0, f1, value;
1215 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1216 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1217 f0 = real_value_truncate (mode, f0);
1218 f1 = real_value_truncate (mode, f1);
1220 if (HONOR_SNANS (mode)
1221 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1222 return 0;
1224 if (code == DIV
1225 && REAL_VALUES_EQUAL (f1, dconst0)
1226 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1227 return 0;
1229 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1231 value = real_value_truncate (mode, value);
1232 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1235 /* We can fold some multi-word operations. */
1236 if (GET_MODE_CLASS (mode) == MODE_INT
1237 && width == HOST_BITS_PER_WIDE_INT * 2
1238 && (GET_CODE (trueop0) == CONST_DOUBLE
1239 || GET_CODE (trueop0) == CONST_INT)
1240 && (GET_CODE (trueop1) == CONST_DOUBLE
1241 || GET_CODE (trueop1) == CONST_INT))
1243 unsigned HOST_WIDE_INT l1, l2, lv;
1244 HOST_WIDE_INT h1, h2, hv;
1246 if (GET_CODE (trueop0) == CONST_DOUBLE)
1247 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1248 else
1249 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1251 if (GET_CODE (trueop1) == CONST_DOUBLE)
1252 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1253 else
1254 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1256 switch (code)
1258 case MINUS:
1259 /* A - B == A + (-B). */
1260 neg_double (l2, h2, &lv, &hv);
1261 l2 = lv, h2 = hv;
1263 /* Fall through.... */
1265 case PLUS:
1266 add_double (l1, h1, l2, h2, &lv, &hv);
1267 break;
1269 case MULT:
1270 mul_double (l1, h1, l2, h2, &lv, &hv);
1271 break;
1273 case DIV: case MOD: case UDIV: case UMOD:
1274 /* We'd need to include tree.h to do this and it doesn't seem worth
1275 it. */
1276 return 0;
1278 case AND:
1279 lv = l1 & l2, hv = h1 & h2;
1280 break;
1282 case IOR:
1283 lv = l1 | l2, hv = h1 | h2;
1284 break;
1286 case XOR:
1287 lv = l1 ^ l2, hv = h1 ^ h2;
1288 break;
1290 case SMIN:
1291 if (h1 < h2
1292 || (h1 == h2
1293 && ((unsigned HOST_WIDE_INT) l1
1294 < (unsigned HOST_WIDE_INT) l2)))
1295 lv = l1, hv = h1;
1296 else
1297 lv = l2, hv = h2;
1298 break;
1300 case SMAX:
1301 if (h1 > h2
1302 || (h1 == h2
1303 && ((unsigned HOST_WIDE_INT) l1
1304 > (unsigned HOST_WIDE_INT) l2)))
1305 lv = l1, hv = h1;
1306 else
1307 lv = l2, hv = h2;
1308 break;
1310 case UMIN:
1311 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1312 || (h1 == h2
1313 && ((unsigned HOST_WIDE_INT) l1
1314 < (unsigned HOST_WIDE_INT) l2)))
1315 lv = l1, hv = h1;
1316 else
1317 lv = l2, hv = h2;
1318 break;
1320 case UMAX:
1321 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1322 || (h1 == h2
1323 && ((unsigned HOST_WIDE_INT) l1
1324 > (unsigned HOST_WIDE_INT) l2)))
1325 lv = l1, hv = h1;
1326 else
1327 lv = l2, hv = h2;
1328 break;
1330 case LSHIFTRT: case ASHIFTRT:
1331 case ASHIFT:
1332 case ROTATE: case ROTATERT:
1333 #ifdef SHIFT_COUNT_TRUNCATED
1334 if (SHIFT_COUNT_TRUNCATED)
1335 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1336 #endif
1338 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1339 return 0;
1341 if (code == LSHIFTRT || code == ASHIFTRT)
1342 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1343 code == ASHIFTRT);
1344 else if (code == ASHIFT)
1345 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1346 else if (code == ROTATE)
1347 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1348 else /* code == ROTATERT */
1349 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1350 break;
1352 default:
1353 return 0;
1356 return immed_double_const (lv, hv, mode);
1359 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1360 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1362 /* Even if we can't compute a constant result,
1363 there are some cases worth simplifying. */
1365 switch (code)
1367 case PLUS:
1368 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1369 when x is NaN, infinite, or finite and nonzero. They aren't
1370 when x is -0 and the rounding mode is not towards -infinity,
1371 since (-0) + 0 is then 0. */
1372 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1373 return op0;
1375 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1376 transformations are safe even for IEEE. */
1377 if (GET_CODE (op0) == NEG)
1378 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1379 else if (GET_CODE (op1) == NEG)
1380 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1382 /* (~a) + 1 -> -a */
1383 if (INTEGRAL_MODE_P (mode)
1384 && GET_CODE (op0) == NOT
1385 && trueop1 == const1_rtx)
1386 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1388 /* Handle both-operands-constant cases. We can only add
1389 CONST_INTs to constants since the sum of relocatable symbols
1390 can't be handled by most assemblers. Don't add CONST_INT
1391 to CONST_INT since overflow won't be computed properly if wider
1392 than HOST_BITS_PER_WIDE_INT. */
1394 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1395 && GET_CODE (op1) == CONST_INT)
1396 return plus_constant (op0, INTVAL (op1));
1397 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1398 && GET_CODE (op0) == CONST_INT)
1399 return plus_constant (op1, INTVAL (op0));
1401 /* See if this is something like X * C - X or vice versa or
1402 if the multiplication is written as a shift. If so, we can
1403 distribute and make a new multiply, shift, or maybe just
1404 have X (if C is 2 in the example above). But don't make
1405 real multiply if we didn't have one before. */
1407 if (! FLOAT_MODE_P (mode))
1409 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1410 rtx lhs = op0, rhs = op1;
1411 int had_mult = 0;
1413 if (GET_CODE (lhs) == NEG)
1414 coeff0 = -1, lhs = XEXP (lhs, 0);
1415 else if (GET_CODE (lhs) == MULT
1416 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1418 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1419 had_mult = 1;
1421 else if (GET_CODE (lhs) == ASHIFT
1422 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1423 && INTVAL (XEXP (lhs, 1)) >= 0
1424 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1426 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1427 lhs = XEXP (lhs, 0);
1430 if (GET_CODE (rhs) == NEG)
1431 coeff1 = -1, rhs = XEXP (rhs, 0);
1432 else if (GET_CODE (rhs) == MULT
1433 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1435 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1436 had_mult = 1;
1438 else if (GET_CODE (rhs) == ASHIFT
1439 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1440 && INTVAL (XEXP (rhs, 1)) >= 0
1441 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1443 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1444 rhs = XEXP (rhs, 0);
1447 if (rtx_equal_p (lhs, rhs))
1449 tem = simplify_gen_binary (MULT, mode, lhs,
1450 GEN_INT (coeff0 + coeff1));
1451 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1455 /* If one of the operands is a PLUS or a MINUS, see if we can
1456 simplify this by the associative law.
1457 Don't use the associative law for floating point.
1458 The inaccuracy makes it nonassociative,
1459 and subtle programs can break if operations are associated. */
1461 if (INTEGRAL_MODE_P (mode)
1462 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1463 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1464 || (GET_CODE (op0) == CONST
1465 && GET_CODE (XEXP (op0, 0)) == PLUS)
1466 || (GET_CODE (op1) == CONST
1467 && GET_CODE (XEXP (op1, 0)) == PLUS))
1468 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1469 return tem;
1471 /* Reassociate floating point addition only when the user
1472 specifies unsafe math optimizations. */
1473 if (FLOAT_MODE_P (mode)
1474 && flag_unsafe_math_optimizations)
1476 tem = simplify_associative_operation (code, mode, op0, op1);
1477 if (tem)
1478 return tem;
1480 break;
1482 case COMPARE:
1483 #ifdef HAVE_cc0
1484 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1485 using cc0, in which case we want to leave it as a COMPARE
1486 so we can distinguish it from a register-register-copy.
1488 In IEEE floating point, x-0 is not the same as x. */
1490 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1491 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1492 && trueop1 == CONST0_RTX (mode))
1493 return op0;
1494 #endif
1496 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1497 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1498 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1499 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1501 rtx xop00 = XEXP (op0, 0);
1502 rtx xop10 = XEXP (op1, 0);
1504 #ifdef HAVE_cc0
1505 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1506 #else
1507 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1508 && GET_MODE (xop00) == GET_MODE (xop10)
1509 && REGNO (xop00) == REGNO (xop10)
1510 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1511 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1512 #endif
1513 return xop00;
1515 break;
1517 case MINUS:
1518 /* We can't assume x-x is 0 even with non-IEEE floating point,
1519 but since it is zero except in very strange circumstances, we
1520 will treat it as zero with -funsafe-math-optimizations. */
1521 if (rtx_equal_p (trueop0, trueop1)
1522 && ! side_effects_p (op0)
1523 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1524 return CONST0_RTX (mode);
1526 /* Change subtraction from zero into negation. (0 - x) is the
1527 same as -x when x is NaN, infinite, or finite and nonzero.
1528 But if the mode has signed zeros, and does not round towards
1529 -infinity, then 0 - 0 is 0, not -0. */
1530 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1531 return simplify_gen_unary (NEG, mode, op1, mode);
1533 /* (-1 - a) is ~a. */
1534 if (trueop0 == constm1_rtx)
1535 return simplify_gen_unary (NOT, mode, op1, mode);
1537 /* Subtracting 0 has no effect unless the mode has signed zeros
1538 and supports rounding towards -infinity. In such a case,
1539 0 - 0 is -0. */
1540 if (!(HONOR_SIGNED_ZEROS (mode)
1541 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1542 && trueop1 == CONST0_RTX (mode))
1543 return op0;
1545 /* See if this is something like X * C - X or vice versa or
1546 if the multiplication is written as a shift. If so, we can
1547 distribute and make a new multiply, shift, or maybe just
1548 have X (if C is 2 in the example above). But don't make
1549 real multiply if we didn't have one before. */
1551 if (! FLOAT_MODE_P (mode))
1553 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1554 rtx lhs = op0, rhs = op1;
1555 int had_mult = 0;
1557 if (GET_CODE (lhs) == NEG)
1558 coeff0 = -1, lhs = XEXP (lhs, 0);
1559 else if (GET_CODE (lhs) == MULT
1560 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1562 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1563 had_mult = 1;
1565 else if (GET_CODE (lhs) == ASHIFT
1566 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1567 && INTVAL (XEXP (lhs, 1)) >= 0
1568 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1570 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1571 lhs = XEXP (lhs, 0);
1574 if (GET_CODE (rhs) == NEG)
1575 coeff1 = - 1, rhs = XEXP (rhs, 0);
1576 else if (GET_CODE (rhs) == MULT
1577 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1579 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1580 had_mult = 1;
1582 else if (GET_CODE (rhs) == ASHIFT
1583 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1584 && INTVAL (XEXP (rhs, 1)) >= 0
1585 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1587 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1588 rhs = XEXP (rhs, 0);
1591 if (rtx_equal_p (lhs, rhs))
1593 tem = simplify_gen_binary (MULT, mode, lhs,
1594 GEN_INT (coeff0 - coeff1));
1595 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1599 /* (a - (-b)) -> (a + b). True even for IEEE. */
1600 if (GET_CODE (op1) == NEG)
1601 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1603 /* (-x - c) may be simplified as (-c - x). */
1604 if (GET_CODE (op0) == NEG
1605 && (GET_CODE (op1) == CONST_INT
1606 || GET_CODE (op1) == CONST_DOUBLE))
1608 tem = simplify_unary_operation (NEG, mode, op1, mode);
1609 if (tem)
1610 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1613 /* If one of the operands is a PLUS or a MINUS, see if we can
1614 simplify this by the associative law.
1615 Don't use the associative law for floating point.
1616 The inaccuracy makes it nonassociative,
1617 and subtle programs can break if operations are associated. */
1619 if (INTEGRAL_MODE_P (mode)
1620 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1621 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1622 || (GET_CODE (op0) == CONST
1623 && GET_CODE (XEXP (op0, 0)) == PLUS)
1624 || (GET_CODE (op1) == CONST
1625 && GET_CODE (XEXP (op1, 0)) == PLUS))
1626 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1627 return tem;
1629 /* Don't let a relocatable value get a negative coeff. */
1630 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1631 return simplify_gen_binary (PLUS, mode,
1632 op0,
1633 neg_const_int (mode, op1));
1635 /* (x - (x & y)) -> (x & ~y) */
1636 if (GET_CODE (op1) == AND)
1638 if (rtx_equal_p (op0, XEXP (op1, 0)))
1640 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1641 GET_MODE (XEXP (op1, 1)));
1642 return simplify_gen_binary (AND, mode, op0, tem);
1644 if (rtx_equal_p (op0, XEXP (op1, 1)))
1646 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1647 GET_MODE (XEXP (op1, 0)));
1648 return simplify_gen_binary (AND, mode, op0, tem);
1651 break;
1653 case MULT:
1654 if (trueop1 == constm1_rtx)
1655 return simplify_gen_unary (NEG, mode, op0, mode);
1657 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1658 x is NaN, since x * 0 is then also NaN. Nor is it valid
1659 when the mode has signed zeros, since multiplying a negative
1660 number by 0 will give -0, not 0. */
1661 if (!HONOR_NANS (mode)
1662 && !HONOR_SIGNED_ZEROS (mode)
1663 && trueop1 == CONST0_RTX (mode)
1664 && ! side_effects_p (op0))
1665 return op1;
1667 /* In IEEE floating point, x*1 is not equivalent to x for
1668 signalling NaNs. */
1669 if (!HONOR_SNANS (mode)
1670 && trueop1 == CONST1_RTX (mode))
1671 return op0;
1673 /* Convert multiply by constant power of two into shift unless
1674 we are still generating RTL. This test is a kludge. */
1675 if (GET_CODE (trueop1) == CONST_INT
1676 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1677 /* If the mode is larger than the host word size, and the
1678 uppermost bit is set, then this isn't a power of two due
1679 to implicit sign extension. */
1680 && (width <= HOST_BITS_PER_WIDE_INT
1681 || val != HOST_BITS_PER_WIDE_INT - 1)
1682 && ! rtx_equal_function_value_matters)
1683 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1685 /* x*2 is x+x and x*(-1) is -x */
1686 if (GET_CODE (trueop1) == CONST_DOUBLE
1687 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1688 && GET_MODE (op0) == mode)
1690 REAL_VALUE_TYPE d;
1691 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1693 if (REAL_VALUES_EQUAL (d, dconst2))
1694 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1696 if (REAL_VALUES_EQUAL (d, dconstm1))
1697 return simplify_gen_unary (NEG, mode, op0, mode);
1700 /* Reassociate multiplication, but for floating point MULTs
1701 only when the user specifies unsafe math optimizations. */
1702 if (! FLOAT_MODE_P (mode)
1703 || flag_unsafe_math_optimizations)
1705 tem = simplify_associative_operation (code, mode, op0, op1);
1706 if (tem)
1707 return tem;
1709 break;
1711 case IOR:
1712 if (trueop1 == const0_rtx)
1713 return op0;
1714 if (GET_CODE (trueop1) == CONST_INT
1715 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1716 == GET_MODE_MASK (mode)))
1717 return op1;
1718 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1719 return op0;
1720 /* A | (~A) -> -1 */
1721 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1722 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1723 && ! side_effects_p (op0)
1724 && GET_MODE_CLASS (mode) != MODE_CC)
1725 return constm1_rtx;
1726 tem = simplify_associative_operation (code, mode, op0, op1);
1727 if (tem)
1728 return tem;
1729 break;
1731 case XOR:
1732 if (trueop1 == const0_rtx)
1733 return op0;
1734 if (GET_CODE (trueop1) == CONST_INT
1735 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1736 == GET_MODE_MASK (mode)))
1737 return simplify_gen_unary (NOT, mode, op0, mode);
1738 if (trueop0 == trueop1 && ! side_effects_p (op0)
1739 && GET_MODE_CLASS (mode) != MODE_CC)
1740 return const0_rtx;
1741 tem = simplify_associative_operation (code, mode, op0, op1);
1742 if (tem)
1743 return tem;
1744 break;
1746 case AND:
1747 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1748 return const0_rtx;
1749 if (GET_CODE (trueop1) == CONST_INT
1750 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1751 == GET_MODE_MASK (mode)))
1752 return op0;
1753 if (trueop0 == trueop1 && ! side_effects_p (op0)
1754 && GET_MODE_CLASS (mode) != MODE_CC)
1755 return op0;
1756 /* A & (~A) -> 0 */
1757 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1758 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1759 && ! side_effects_p (op0)
1760 && GET_MODE_CLASS (mode) != MODE_CC)
1761 return const0_rtx;
1762 tem = simplify_associative_operation (code, mode, op0, op1);
1763 if (tem)
1764 return tem;
1765 break;
1767 case UDIV:
1768 /* Convert divide by power of two into shift (divide by 1 handled
1769 below). */
1770 if (GET_CODE (trueop1) == CONST_INT
1771 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1772 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1774 /* Fall through.... */
1776 case DIV:
1777 if (trueop1 == CONST1_RTX (mode))
1779 /* On some platforms DIV uses narrower mode than its
1780 operands. */
1781 rtx x = gen_lowpart_common (mode, op0);
1782 if (x)
1783 return x;
1784 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1785 return gen_lowpart_SUBREG (mode, op0);
1786 else
1787 return op0;
1790 /* Maybe change 0 / x to 0. This transformation isn't safe for
1791 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1792 Nor is it safe for modes with signed zeros, since dividing
1793 0 by a negative number gives -0, not 0. */
1794 if (!HONOR_NANS (mode)
1795 && !HONOR_SIGNED_ZEROS (mode)
1796 && trueop0 == CONST0_RTX (mode)
1797 && ! side_effects_p (op1))
1798 return op0;
1800 /* Change division by a constant into multiplication. Only do
1801 this with -funsafe-math-optimizations. */
1802 else if (GET_CODE (trueop1) == CONST_DOUBLE
1803 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1804 && trueop1 != CONST0_RTX (mode)
1805 && flag_unsafe_math_optimizations)
1807 REAL_VALUE_TYPE d;
1808 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1810 if (! REAL_VALUES_EQUAL (d, dconst0))
1812 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1813 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1814 return simplify_gen_binary (MULT, mode, op0, tem);
1817 break;
1819 case UMOD:
1820 /* Handle modulus by power of two (mod with 1 handled below). */
1821 if (GET_CODE (trueop1) == CONST_INT
1822 && exact_log2 (INTVAL (trueop1)) > 0)
1823 return simplify_gen_binary (AND, mode, op0,
1824 GEN_INT (INTVAL (op1) - 1));
1826 /* Fall through.... */
1828 case MOD:
1829 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1830 && ! side_effects_p (op0) && ! side_effects_p (op1))
1831 return const0_rtx;
1832 break;
1834 case ROTATERT:
1835 case ROTATE:
1836 case ASHIFTRT:
1837 /* Rotating ~0 always results in ~0. */
1838 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1839 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1840 && ! side_effects_p (op1))
1841 return op0;
1843 /* Fall through.... */
1845 case ASHIFT:
1846 case LSHIFTRT:
1847 if (trueop1 == const0_rtx)
1848 return op0;
1849 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1850 return op0;
1851 break;
1853 case SMIN:
1854 if (width <= HOST_BITS_PER_WIDE_INT
1855 && GET_CODE (trueop1) == CONST_INT
1856 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1857 && ! side_effects_p (op0))
1858 return op1;
1859 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1860 return op0;
1861 tem = simplify_associative_operation (code, mode, op0, op1);
1862 if (tem)
1863 return tem;
1864 break;
1866 case SMAX:
1867 if (width <= HOST_BITS_PER_WIDE_INT
1868 && GET_CODE (trueop1) == CONST_INT
1869 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1870 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1871 && ! side_effects_p (op0))
1872 return op1;
1873 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1874 return op0;
1875 tem = simplify_associative_operation (code, mode, op0, op1);
1876 if (tem)
1877 return tem;
1878 break;
1880 case UMIN:
1881 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1882 return op1;
1883 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1884 return op0;
1885 tem = simplify_associative_operation (code, mode, op0, op1);
1886 if (tem)
1887 return tem;
1888 break;
1890 case UMAX:
1891 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1892 return op1;
1893 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1894 return op0;
1895 tem = simplify_associative_operation (code, mode, op0, op1);
1896 if (tem)
1897 return tem;
1898 break;
1900 case SS_PLUS:
1901 case US_PLUS:
1902 case SS_MINUS:
1903 case US_MINUS:
1904 /* ??? There are simplifications that can be done. */
1905 return 0;
1907 case VEC_SELECT:
1908 if (!VECTOR_MODE_P (mode))
1910 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1911 || (mode
1912 != GET_MODE_INNER (GET_MODE (trueop0)))
1913 || GET_CODE (trueop1) != PARALLEL
1914 || XVECLEN (trueop1, 0) != 1
1915 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1916 abort ();
1918 if (GET_CODE (trueop0) == CONST_VECTOR)
1919 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1921 else
1923 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1924 || (GET_MODE_INNER (mode)
1925 != GET_MODE_INNER (GET_MODE (trueop0)))
1926 || GET_CODE (trueop1) != PARALLEL)
1927 abort ();
1929 if (GET_CODE (trueop0) == CONST_VECTOR)
1931 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1932 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1933 rtvec v = rtvec_alloc (n_elts);
1934 unsigned int i;
1936 if (XVECLEN (trueop1, 0) != (int) n_elts)
1937 abort ();
1938 for (i = 0; i < n_elts; i++)
1940 rtx x = XVECEXP (trueop1, 0, i);
1942 if (GET_CODE (x) != CONST_INT)
1943 abort ();
1944 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1947 return gen_rtx_CONST_VECTOR (mode, v);
1950 return 0;
1951 case VEC_CONCAT:
1953 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1954 ? GET_MODE (trueop0)
1955 : GET_MODE_INNER (mode));
1956 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1957 ? GET_MODE (trueop1)
1958 : GET_MODE_INNER (mode));
1960 if (!VECTOR_MODE_P (mode)
1961 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1962 != GET_MODE_SIZE (mode)))
1963 abort ();
1965 if ((VECTOR_MODE_P (op0_mode)
1966 && (GET_MODE_INNER (mode)
1967 != GET_MODE_INNER (op0_mode)))
1968 || (!VECTOR_MODE_P (op0_mode)
1969 && GET_MODE_INNER (mode) != op0_mode))
1970 abort ();
1972 if ((VECTOR_MODE_P (op1_mode)
1973 && (GET_MODE_INNER (mode)
1974 != GET_MODE_INNER (op1_mode)))
1975 || (!VECTOR_MODE_P (op1_mode)
1976 && GET_MODE_INNER (mode) != op1_mode))
1977 abort ();
1979 if ((GET_CODE (trueop0) == CONST_VECTOR
1980 || GET_CODE (trueop0) == CONST_INT
1981 || GET_CODE (trueop0) == CONST_DOUBLE)
1982 && (GET_CODE (trueop1) == CONST_VECTOR
1983 || GET_CODE (trueop1) == CONST_INT
1984 || GET_CODE (trueop1) == CONST_DOUBLE))
1986 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1987 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1988 rtvec v = rtvec_alloc (n_elts);
1989 unsigned int i;
1990 unsigned in_n_elts = 1;
1992 if (VECTOR_MODE_P (op0_mode))
1993 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1994 for (i = 0; i < n_elts; i++)
1996 if (i < in_n_elts)
1998 if (!VECTOR_MODE_P (op0_mode))
1999 RTVEC_ELT (v, i) = trueop0;
2000 else
2001 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2003 else
2005 if (!VECTOR_MODE_P (op1_mode))
2006 RTVEC_ELT (v, i) = trueop1;
2007 else
2008 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2009 i - in_n_elts);
2013 return gen_rtx_CONST_VECTOR (mode, v);
2016 return 0;
2018 default:
2019 abort ();
2022 return 0;
2025 /* Get the integer argument values in two forms:
2026 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2028 arg0 = INTVAL (trueop0);
2029 arg1 = INTVAL (trueop1);
2031 if (width < HOST_BITS_PER_WIDE_INT)
2033 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2034 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2036 arg0s = arg0;
2037 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2038 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2040 arg1s = arg1;
2041 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2042 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2044 else
2046 arg0s = arg0;
2047 arg1s = arg1;
2050 /* Compute the value of the arithmetic. */
2052 switch (code)
2054 case PLUS:
2055 val = arg0s + arg1s;
2056 break;
2058 case MINUS:
2059 val = arg0s - arg1s;
2060 break;
2062 case MULT:
2063 val = arg0s * arg1s;
2064 break;
2066 case DIV:
2067 if (arg1s == 0
2068 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2069 && arg1s == -1))
2070 return 0;
2071 val = arg0s / arg1s;
2072 break;
2074 case MOD:
2075 if (arg1s == 0
2076 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2077 && arg1s == -1))
2078 return 0;
2079 val = arg0s % arg1s;
2080 break;
2082 case UDIV:
2083 if (arg1 == 0
2084 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2085 && arg1s == -1))
2086 return 0;
2087 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2088 break;
2090 case UMOD:
2091 if (arg1 == 0
2092 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2093 && arg1s == -1))
2094 return 0;
2095 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2096 break;
2098 case AND:
2099 val = arg0 & arg1;
2100 break;
2102 case IOR:
2103 val = arg0 | arg1;
2104 break;
2106 case XOR:
2107 val = arg0 ^ arg1;
2108 break;
2110 case LSHIFTRT:
2111 /* If shift count is undefined, don't fold it; let the machine do
2112 what it wants. But truncate it if the machine will do that. */
2113 if (arg1 < 0)
2114 return 0;
2116 #ifdef SHIFT_COUNT_TRUNCATED
2117 if (SHIFT_COUNT_TRUNCATED)
2118 arg1 %= width;
2119 #endif
2121 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2122 break;
2124 case ASHIFT:
2125 if (arg1 < 0)
2126 return 0;
2128 #ifdef SHIFT_COUNT_TRUNCATED
2129 if (SHIFT_COUNT_TRUNCATED)
2130 arg1 %= width;
2131 #endif
2133 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2134 break;
2136 case ASHIFTRT:
2137 if (arg1 < 0)
2138 return 0;
2140 #ifdef SHIFT_COUNT_TRUNCATED
2141 if (SHIFT_COUNT_TRUNCATED)
2142 arg1 %= width;
2143 #endif
2145 val = arg0s >> arg1;
2147 /* Bootstrap compiler may not have sign extended the right shift.
2148 Manually extend the sign to insure bootstrap cc matches gcc. */
2149 if (arg0s < 0 && arg1 > 0)
2150 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2152 break;
2154 case ROTATERT:
2155 if (arg1 < 0)
2156 return 0;
2158 arg1 %= width;
2159 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2160 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2161 break;
2163 case ROTATE:
2164 if (arg1 < 0)
2165 return 0;
2167 arg1 %= width;
2168 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2169 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2170 break;
2172 case COMPARE:
2173 /* Do nothing here. */
2174 return 0;
2176 case SMIN:
2177 val = arg0s <= arg1s ? arg0s : arg1s;
2178 break;
2180 case UMIN:
2181 val = ((unsigned HOST_WIDE_INT) arg0
2182 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2183 break;
2185 case SMAX:
2186 val = arg0s > arg1s ? arg0s : arg1s;
2187 break;
2189 case UMAX:
2190 val = ((unsigned HOST_WIDE_INT) arg0
2191 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2192 break;
2194 case SS_PLUS:
2195 case US_PLUS:
2196 case SS_MINUS:
2197 case US_MINUS:
2198 /* ??? There are simplifications that can be done. */
2199 return 0;
2201 default:
2202 abort ();
2205 val = trunc_int_for_mode (val, mode);
2207 return GEN_INT (val);
2210 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2211 PLUS or MINUS.
2213 Rather than test for specific case, we do this by a brute-force method
2214 and do all possible simplifications until no more changes occur. Then
2215 we rebuild the operation.
2217 If FORCE is true, then always generate the rtx. This is used to
2218 canonicalize stuff emitted from simplify_gen_binary. Note that this
2219 can still fail if the rtx is too complex. It won't fail just because
2220 the result is not 'simpler' than the input, however. */
2222 struct simplify_plus_minus_op_data
2224 rtx op;
2225 int neg;
2228 static int
2229 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2231 const struct simplify_plus_minus_op_data *d1 = p1;
2232 const struct simplify_plus_minus_op_data *d2 = p2;
2234 return (commutative_operand_precedence (d2->op)
2235 - commutative_operand_precedence (d1->op));
2238 static rtx
2239 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2240 rtx op1, int force)
2242 struct simplify_plus_minus_op_data ops[8];
2243 rtx result, tem;
2244 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2245 int first, changed;
2246 int i, j;
2248 memset (ops, 0, sizeof ops);
2250 /* Set up the two operands and then expand them until nothing has been
2251 changed. If we run out of room in our array, give up; this should
2252 almost never happen. */
2254 ops[0].op = op0;
2255 ops[0].neg = 0;
2256 ops[1].op = op1;
2257 ops[1].neg = (code == MINUS);
2261 changed = 0;
2263 for (i = 0; i < n_ops; i++)
2265 rtx this_op = ops[i].op;
2266 int this_neg = ops[i].neg;
2267 enum rtx_code this_code = GET_CODE (this_op);
2269 switch (this_code)
2271 case PLUS:
2272 case MINUS:
2273 if (n_ops == 7)
2274 return NULL_RTX;
2276 ops[n_ops].op = XEXP (this_op, 1);
2277 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2278 n_ops++;
2280 ops[i].op = XEXP (this_op, 0);
2281 input_ops++;
2282 changed = 1;
2283 break;
2285 case NEG:
2286 ops[i].op = XEXP (this_op, 0);
2287 ops[i].neg = ! this_neg;
2288 changed = 1;
2289 break;
2291 case CONST:
2292 if (n_ops < 7
2293 && GET_CODE (XEXP (this_op, 0)) == PLUS
2294 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2295 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2297 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2298 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2299 ops[n_ops].neg = this_neg;
2300 n_ops++;
2301 input_consts++;
2302 changed = 1;
2304 break;
2306 case NOT:
2307 /* ~a -> (-a - 1) */
2308 if (n_ops != 7)
2310 ops[n_ops].op = constm1_rtx;
2311 ops[n_ops++].neg = this_neg;
2312 ops[i].op = XEXP (this_op, 0);
2313 ops[i].neg = !this_neg;
2314 changed = 1;
2316 break;
2318 case CONST_INT:
2319 if (this_neg)
2321 ops[i].op = neg_const_int (mode, this_op);
2322 ops[i].neg = 0;
2323 changed = 1;
2325 break;
2327 default:
2328 break;
2332 while (changed);
2334 /* If we only have two operands, we can't do anything. */
2335 if (n_ops <= 2 && !force)
2336 return NULL_RTX;
2338 /* Count the number of CONSTs we didn't split above. */
2339 for (i = 0; i < n_ops; i++)
2340 if (GET_CODE (ops[i].op) == CONST)
2341 input_consts++;
2343 /* Now simplify each pair of operands until nothing changes. The first
2344 time through just simplify constants against each other. */
2346 first = 1;
2349 changed = first;
2351 for (i = 0; i < n_ops - 1; i++)
2352 for (j = i + 1; j < n_ops; j++)
2354 rtx lhs = ops[i].op, rhs = ops[j].op;
2355 int lneg = ops[i].neg, rneg = ops[j].neg;
2357 if (lhs != 0 && rhs != 0
2358 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2360 enum rtx_code ncode = PLUS;
2362 if (lneg != rneg)
2364 ncode = MINUS;
2365 if (lneg)
2366 tem = lhs, lhs = rhs, rhs = tem;
2368 else if (swap_commutative_operands_p (lhs, rhs))
2369 tem = lhs, lhs = rhs, rhs = tem;
2371 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2373 /* Reject "simplifications" that just wrap the two
2374 arguments in a CONST. Failure to do so can result
2375 in infinite recursion with simplify_binary_operation
2376 when it calls us to simplify CONST operations. */
2377 if (tem
2378 && ! (GET_CODE (tem) == CONST
2379 && GET_CODE (XEXP (tem, 0)) == ncode
2380 && XEXP (XEXP (tem, 0), 0) == lhs
2381 && XEXP (XEXP (tem, 0), 1) == rhs)
2382 /* Don't allow -x + -1 -> ~x simplifications in the
2383 first pass. This allows us the chance to combine
2384 the -1 with other constants. */
2385 && ! (first
2386 && GET_CODE (tem) == NOT
2387 && XEXP (tem, 0) == rhs))
2389 lneg &= rneg;
2390 if (GET_CODE (tem) == NEG)
2391 tem = XEXP (tem, 0), lneg = !lneg;
2392 if (GET_CODE (tem) == CONST_INT && lneg)
2393 tem = neg_const_int (mode, tem), lneg = 0;
2395 ops[i].op = tem;
2396 ops[i].neg = lneg;
2397 ops[j].op = NULL_RTX;
2398 changed = 1;
2403 first = 0;
2405 while (changed);
2407 /* Pack all the operands to the lower-numbered entries. */
2408 for (i = 0, j = 0; j < n_ops; j++)
2409 if (ops[j].op)
2410 ops[i++] = ops[j];
2411 n_ops = i;
2413 /* Sort the operations based on swap_commutative_operands_p. */
2414 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2416 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2417 if (n_ops == 2
2418 && GET_CODE (ops[1].op) == CONST_INT
2419 && CONSTANT_P (ops[0].op)
2420 && ops[0].neg)
2421 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2423 /* We suppressed creation of trivial CONST expressions in the
2424 combination loop to avoid recursion. Create one manually now.
2425 The combination loop should have ensured that there is exactly
2426 one CONST_INT, and the sort will have ensured that it is last
2427 in the array and that any other constant will be next-to-last. */
2429 if (n_ops > 1
2430 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2431 && CONSTANT_P (ops[n_ops - 2].op))
2433 rtx value = ops[n_ops - 1].op;
2434 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2435 value = neg_const_int (mode, value);
2436 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2437 n_ops--;
2440 /* Count the number of CONSTs that we generated. */
2441 n_consts = 0;
2442 for (i = 0; i < n_ops; i++)
2443 if (GET_CODE (ops[i].op) == CONST)
2444 n_consts++;
2446 /* Give up if we didn't reduce the number of operands we had. Make
2447 sure we count a CONST as two operands. If we have the same
2448 number of operands, but have made more CONSTs than before, this
2449 is also an improvement, so accept it. */
2450 if (!force
2451 && (n_ops + n_consts > input_ops
2452 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2453 return NULL_RTX;
2455 /* Put a non-negated operand first, if possible. */
2457 for (i = 0; i < n_ops && ops[i].neg; i++)
2458 continue;
2459 if (i == n_ops)
2460 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2461 else if (i != 0)
2463 tem = ops[0].op;
2464 ops[0] = ops[i];
2465 ops[i].op = tem;
2466 ops[i].neg = 1;
2469 /* Now make the result by performing the requested operations. */
2470 result = ops[0].op;
2471 for (i = 1; i < n_ops; i++)
2472 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2473 mode, result, ops[i].op);
2475 return result;
2478 /* Like simplify_binary_operation except used for relational operators.
2479 MODE is the mode of the operands, not that of the result. If MODE
2480 is VOIDmode, both operands must also be VOIDmode and we compare the
2481 operands in "infinite precision".
2483 If no simplification is possible, this function returns zero. Otherwise,
2484 it returns either const_true_rtx or const0_rtx. */
2487 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2488 rtx op0, rtx op1)
2490 int equal, op0lt, op0ltu, op1lt, op1ltu;
2491 rtx tem;
2492 rtx trueop0;
2493 rtx trueop1;
2495 if (mode == VOIDmode
2496 && (GET_MODE (op0) != VOIDmode
2497 || GET_MODE (op1) != VOIDmode))
2498 abort ();
2500 /* If op0 is a compare, extract the comparison arguments from it. */
2501 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2502 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2504 /* We can't simplify MODE_CC values since we don't know what the
2505 actual comparison is. */
2506 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2507 return 0;
2509 /* Make sure the constant is second. */
2510 if (swap_commutative_operands_p (op0, op1))
2512 tem = op0, op0 = op1, op1 = tem;
2513 code = swap_condition (code);
2516 trueop0 = avoid_constant_pool_reference (op0);
2517 trueop1 = avoid_constant_pool_reference (op1);
2519 /* For integer comparisons of A and B maybe we can simplify A - B and can
2520 then simplify a comparison of that with zero. If A and B are both either
2521 a register or a CONST_INT, this can't help; testing for these cases will
2522 prevent infinite recursion here and speed things up.
2524 If CODE is an unsigned comparison, then we can never do this optimization,
2525 because it gives an incorrect result if the subtraction wraps around zero.
2526 ANSI C defines unsigned operations such that they never overflow, and
2527 thus such cases can not be ignored. */
2529 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2530 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2531 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2532 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2533 /* We cannot do this for == or != if tem is a nonzero address. */
2534 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2535 && code != GTU && code != GEU && code != LTU && code != LEU)
2536 return simplify_relational_operation (signed_condition (code),
2537 mode, tem, const0_rtx);
2539 if (flag_unsafe_math_optimizations && code == ORDERED)
2540 return const_true_rtx;
2542 if (flag_unsafe_math_optimizations && code == UNORDERED)
2543 return const0_rtx;
2545 /* For modes without NaNs, if the two operands are equal, we know the
2546 result except if they have side-effects. */
2547 if (! HONOR_NANS (GET_MODE (trueop0))
2548 && rtx_equal_p (trueop0, trueop1)
2549 && ! side_effects_p (trueop0))
2550 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2552 /* If the operands are floating-point constants, see if we can fold
2553 the result. */
2554 else if (GET_CODE (trueop0) == CONST_DOUBLE
2555 && GET_CODE (trueop1) == CONST_DOUBLE
2556 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2558 REAL_VALUE_TYPE d0, d1;
2560 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2561 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2563 /* Comparisons are unordered iff at least one of the values is NaN. */
2564 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2565 switch (code)
2567 case UNEQ:
2568 case UNLT:
2569 case UNGT:
2570 case UNLE:
2571 case UNGE:
2572 case NE:
2573 case UNORDERED:
2574 return const_true_rtx;
2575 case EQ:
2576 case LT:
2577 case GT:
2578 case LE:
2579 case GE:
2580 case LTGT:
2581 case ORDERED:
2582 return const0_rtx;
2583 default:
2584 return 0;
2587 equal = REAL_VALUES_EQUAL (d0, d1);
2588 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2589 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2592 /* Otherwise, see if the operands are both integers. */
2593 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2594 && (GET_CODE (trueop0) == CONST_DOUBLE
2595 || GET_CODE (trueop0) == CONST_INT)
2596 && (GET_CODE (trueop1) == CONST_DOUBLE
2597 || GET_CODE (trueop1) == CONST_INT))
2599 int width = GET_MODE_BITSIZE (mode);
2600 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2601 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2603 /* Get the two words comprising each integer constant. */
2604 if (GET_CODE (trueop0) == CONST_DOUBLE)
2606 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2607 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2609 else
2611 l0u = l0s = INTVAL (trueop0);
2612 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2615 if (GET_CODE (trueop1) == CONST_DOUBLE)
2617 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2618 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2620 else
2622 l1u = l1s = INTVAL (trueop1);
2623 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2626 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2627 we have to sign or zero-extend the values. */
2628 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2630 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2631 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2633 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2634 l0s |= ((HOST_WIDE_INT) (-1) << width);
2636 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2637 l1s |= ((HOST_WIDE_INT) (-1) << width);
2639 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2640 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2642 equal = (h0u == h1u && l0u == l1u);
2643 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2644 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2645 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2646 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2649 /* Otherwise, there are some code-specific tests we can make. */
2650 else
2652 switch (code)
2654 case EQ:
2655 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2656 return const0_rtx;
2657 break;
2659 case NE:
2660 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2661 return const_true_rtx;
2662 break;
2664 case GEU:
2665 /* Unsigned values are never negative. */
2666 if (trueop1 == const0_rtx)
2667 return const_true_rtx;
2668 break;
2670 case LTU:
2671 if (trueop1 == const0_rtx)
2672 return const0_rtx;
2673 break;
2675 case LEU:
2676 /* Unsigned values are never greater than the largest
2677 unsigned value. */
2678 if (GET_CODE (trueop1) == CONST_INT
2679 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2680 && INTEGRAL_MODE_P (mode))
2681 return const_true_rtx;
2682 break;
2684 case GTU:
2685 if (GET_CODE (trueop1) == CONST_INT
2686 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2687 && INTEGRAL_MODE_P (mode))
2688 return const0_rtx;
2689 break;
2691 case LT:
2692 /* Optimize abs(x) < 0.0. */
2693 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2695 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2696 : trueop0;
2697 if (GET_CODE (tem) == ABS)
2698 return const0_rtx;
2700 break;
2702 case GE:
2703 /* Optimize abs(x) >= 0.0. */
2704 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2706 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2707 : trueop0;
2708 if (GET_CODE (tem) == ABS)
2709 return const_true_rtx;
2711 break;
2713 case UNGE:
2714 /* Optimize ! (abs(x) < 0.0). */
2715 if (trueop1 == CONST0_RTX (mode))
2717 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2718 : trueop0;
2719 if (GET_CODE (tem) == ABS)
2720 return const_true_rtx;
2722 break;
2724 default:
2725 break;
2728 return 0;
2731 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2732 as appropriate. */
2733 switch (code)
2735 case EQ:
2736 case UNEQ:
2737 return equal ? const_true_rtx : const0_rtx;
2738 case NE:
2739 case LTGT:
2740 return ! equal ? const_true_rtx : const0_rtx;
2741 case LT:
2742 case UNLT:
2743 return op0lt ? const_true_rtx : const0_rtx;
2744 case GT:
2745 case UNGT:
2746 return op1lt ? const_true_rtx : const0_rtx;
2747 case LTU:
2748 return op0ltu ? const_true_rtx : const0_rtx;
2749 case GTU:
2750 return op1ltu ? const_true_rtx : const0_rtx;
2751 case LE:
2752 case UNLE:
2753 return equal || op0lt ? const_true_rtx : const0_rtx;
2754 case GE:
2755 case UNGE:
2756 return equal || op1lt ? const_true_rtx : const0_rtx;
2757 case LEU:
2758 return equal || op0ltu ? const_true_rtx : const0_rtx;
2759 case GEU:
2760 return equal || op1ltu ? const_true_rtx : const0_rtx;
2761 case ORDERED:
2762 return const_true_rtx;
2763 case UNORDERED:
2764 return const0_rtx;
2765 default:
2766 abort ();
2770 /* Simplify CODE, an operation with result mode MODE and three operands,
2771 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2772 a constant. Return 0 if no simplifications is possible. */
2775 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2776 enum machine_mode op0_mode, rtx op0, rtx op1,
2777 rtx op2)
2779 unsigned int width = GET_MODE_BITSIZE (mode);
2781 /* VOIDmode means "infinite" precision. */
2782 if (width == 0)
2783 width = HOST_BITS_PER_WIDE_INT;
2785 switch (code)
2787 case SIGN_EXTRACT:
2788 case ZERO_EXTRACT:
2789 if (GET_CODE (op0) == CONST_INT
2790 && GET_CODE (op1) == CONST_INT
2791 && GET_CODE (op2) == CONST_INT
2792 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2793 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2795 /* Extracting a bit-field from a constant */
2796 HOST_WIDE_INT val = INTVAL (op0);
2798 if (BITS_BIG_ENDIAN)
2799 val >>= (GET_MODE_BITSIZE (op0_mode)
2800 - INTVAL (op2) - INTVAL (op1));
2801 else
2802 val >>= INTVAL (op2);
2804 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2806 /* First zero-extend. */
2807 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2808 /* If desired, propagate sign bit. */
2809 if (code == SIGN_EXTRACT
2810 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2811 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2814 /* Clear the bits that don't belong in our mode,
2815 unless they and our sign bit are all one.
2816 So we get either a reasonable negative value or a reasonable
2817 unsigned value for this mode. */
2818 if (width < HOST_BITS_PER_WIDE_INT
2819 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2820 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2821 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2823 return GEN_INT (val);
2825 break;
2827 case IF_THEN_ELSE:
2828 if (GET_CODE (op0) == CONST_INT)
2829 return op0 != const0_rtx ? op1 : op2;
2831 /* Convert c ? a : a into "a". */
2832 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2833 return op1;
2835 /* Convert a != b ? a : b into "a". */
2836 if (GET_CODE (op0) == NE
2837 && ! side_effects_p (op0)
2838 && ! HONOR_NANS (mode)
2839 && ! HONOR_SIGNED_ZEROS (mode)
2840 && ((rtx_equal_p (XEXP (op0, 0), op1)
2841 && rtx_equal_p (XEXP (op0, 1), op2))
2842 || (rtx_equal_p (XEXP (op0, 0), op2)
2843 && rtx_equal_p (XEXP (op0, 1), op1))))
2844 return op1;
2846 /* Convert a == b ? a : b into "b". */
2847 if (GET_CODE (op0) == EQ
2848 && ! side_effects_p (op0)
2849 && ! HONOR_NANS (mode)
2850 && ! HONOR_SIGNED_ZEROS (mode)
2851 && ((rtx_equal_p (XEXP (op0, 0), op1)
2852 && rtx_equal_p (XEXP (op0, 1), op2))
2853 || (rtx_equal_p (XEXP (op0, 0), op2)
2854 && rtx_equal_p (XEXP (op0, 1), op1))))
2855 return op2;
2857 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2859 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2860 ? GET_MODE (XEXP (op0, 1))
2861 : GET_MODE (XEXP (op0, 0)));
2862 rtx temp;
2863 if (cmp_mode == VOIDmode)
2864 cmp_mode = op0_mode;
2865 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2866 XEXP (op0, 0), XEXP (op0, 1));
2868 /* See if any simplifications were possible. */
2869 if (temp == const0_rtx)
2870 return op2;
2871 else if (temp == const_true_rtx)
2872 return op1;
2873 else if (temp)
2874 abort ();
2876 /* Look for happy constants in op1 and op2. */
2877 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2879 HOST_WIDE_INT t = INTVAL (op1);
2880 HOST_WIDE_INT f = INTVAL (op2);
2882 if (t == STORE_FLAG_VALUE && f == 0)
2883 code = GET_CODE (op0);
2884 else if (t == 0 && f == STORE_FLAG_VALUE)
2886 enum rtx_code tmp;
2887 tmp = reversed_comparison_code (op0, NULL_RTX);
2888 if (tmp == UNKNOWN)
2889 break;
2890 code = tmp;
2892 else
2893 break;
2895 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2898 break;
2900 case VEC_MERGE:
2901 if (GET_MODE (op0) != mode
2902 || GET_MODE (op1) != mode
2903 || !VECTOR_MODE_P (mode))
2904 abort ();
2905 op2 = avoid_constant_pool_reference (op2);
2906 if (GET_CODE (op2) == CONST_INT)
2908 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2909 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2910 int mask = (1 << n_elts) - 1;
2912 if (!(INTVAL (op2) & mask))
2913 return op1;
2914 if ((INTVAL (op2) & mask) == mask)
2915 return op0;
2917 op0 = avoid_constant_pool_reference (op0);
2918 op1 = avoid_constant_pool_reference (op1);
2919 if (GET_CODE (op0) == CONST_VECTOR
2920 && GET_CODE (op1) == CONST_VECTOR)
2922 rtvec v = rtvec_alloc (n_elts);
2923 unsigned int i;
2925 for (i = 0; i < n_elts; i++)
2926 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2927 ? CONST_VECTOR_ELT (op0, i)
2928 : CONST_VECTOR_ELT (op1, i));
2929 return gen_rtx_CONST_VECTOR (mode, v);
2932 break;
2934 default:
2935 abort ();
2938 return 0;
2941 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
2942 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
2944 Works by unpacking OP into a collection of 8-bit values
2945 represented as a little-endian array of 'unsigned char', selecting by BYTE,
2946 and then repacking them again for OUTERMODE. */
2948 static rtx
2949 simplify_immed_subreg (enum machine_mode outermode, rtx op,
2950 enum machine_mode innermode, unsigned int byte)
2952 /* We support up to 512-bit values (for V8DFmode). */
2953 enum {
2954 max_bitsize = 512,
2955 value_bit = 8,
2956 value_mask = (1 << value_bit) - 1
2958 unsigned char value[max_bitsize / value_bit];
2959 int value_start;
2960 int i;
2961 int elem;
2963 int num_elem;
2964 rtx * elems;
2965 int elem_bitsize;
2966 rtx result_s;
2967 rtvec result_v = NULL;
2968 enum mode_class outer_class;
2969 enum machine_mode outer_submode;
2971 /* Some ports misuse CCmode. */
2972 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
2973 return op;
2975 /* Unpack the value. */
2977 if (GET_CODE (op) == CONST_VECTOR)
2979 num_elem = CONST_VECTOR_NUNITS (op);
2980 elems = &CONST_VECTOR_ELT (op, 0);
2981 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
2983 else
2985 num_elem = 1;
2986 elems = &op;
2987 elem_bitsize = max_bitsize;
2990 if (BITS_PER_UNIT % value_bit != 0)
2991 abort (); /* Too complicated; reducing value_bit may help. */
2992 if (elem_bitsize % BITS_PER_UNIT != 0)
2993 abort (); /* I don't know how to handle endianness of sub-units. */
2995 for (elem = 0; elem < num_elem; elem++)
2997 unsigned char * vp;
2998 rtx el = elems[elem];
3000 /* Vectors are kept in target memory order. (This is probably
3001 a mistake.) */
3003 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3004 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3005 / BITS_PER_UNIT);
3006 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3007 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3008 unsigned bytele = (subword_byte % UNITS_PER_WORD
3009 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3010 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3013 switch (GET_CODE (el))
3015 case CONST_INT:
3016 for (i = 0;
3017 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3018 i += value_bit)
3019 *vp++ = INTVAL (el) >> i;
3020 /* CONST_INTs are always logically sign-extended. */
3021 for (; i < elem_bitsize; i += value_bit)
3022 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3023 break;
3025 case CONST_DOUBLE:
3026 if (GET_MODE (el) == VOIDmode)
3028 /* If this triggers, someone should have generated a
3029 CONST_INT instead. */
3030 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3031 abort ();
3033 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3034 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3035 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3037 *vp++
3038 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3039 i += value_bit;
3041 /* It shouldn't matter what's done here, so fill it with
3042 zero. */
3043 for (; i < max_bitsize; i += value_bit)
3044 *vp++ = 0;
3046 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3048 long tmp[max_bitsize / 32];
3049 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3051 if (bitsize > elem_bitsize)
3052 abort ();
3053 if (bitsize % value_bit != 0)
3054 abort ();
3056 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3057 GET_MODE (el));
3059 /* real_to_target produces its result in words affected by
3060 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3061 and use WORDS_BIG_ENDIAN instead; see the documentation
3062 of SUBREG in rtl.texi. */
3063 for (i = 0; i < bitsize; i += value_bit)
3065 int ibase;
3066 if (WORDS_BIG_ENDIAN)
3067 ibase = bitsize - 1 - i;
3068 else
3069 ibase = i;
3070 *vp++ = tmp[ibase / 32] >> i % 32;
3073 /* It shouldn't matter what's done here, so fill it with
3074 zero. */
3075 for (; i < elem_bitsize; i += value_bit)
3076 *vp++ = 0;
3078 else
3079 abort ();
3080 break;
3082 default:
3083 abort ();
3087 /* Now, pick the right byte to start with. */
3088 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3089 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3090 will already have offset 0. */
3091 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3093 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3094 - byte);
3095 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3096 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3097 byte = (subword_byte % UNITS_PER_WORD
3098 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3101 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3102 so if it's become negative it will instead be very large.) */
3103 if (byte >= GET_MODE_SIZE (innermode))
3104 abort ();
3106 /* Convert from bytes to chunks of size value_bit. */
3107 value_start = byte * (BITS_PER_UNIT / value_bit);
3109 /* Re-pack the value. */
3111 if (VECTOR_MODE_P (outermode))
3113 num_elem = GET_MODE_NUNITS (outermode);
3114 result_v = rtvec_alloc (num_elem);
3115 elems = &RTVEC_ELT (result_v, 0);
3116 outer_submode = GET_MODE_INNER (outermode);
3118 else
3120 num_elem = 1;
3121 elems = &result_s;
3122 outer_submode = outermode;
3125 outer_class = GET_MODE_CLASS (outer_submode);
3126 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3128 if (elem_bitsize % value_bit != 0)
3129 abort ();
3130 if (elem_bitsize + value_start * value_bit > max_bitsize)
3131 abort ();
3133 for (elem = 0; elem < num_elem; elem++)
3135 unsigned char *vp;
3137 /* Vectors are stored in target memory order. (This is probably
3138 a mistake.) */
3140 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3141 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3142 / BITS_PER_UNIT);
3143 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3144 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3145 unsigned bytele = (subword_byte % UNITS_PER_WORD
3146 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3147 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3150 switch (outer_class)
3152 case MODE_INT:
3153 case MODE_PARTIAL_INT:
3155 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3157 for (i = 0;
3158 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3159 i += value_bit)
3160 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3161 for (; i < elem_bitsize; i += value_bit)
3162 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3163 << (i - HOST_BITS_PER_WIDE_INT));
3165 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3166 know why. */
3167 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3168 elems[elem] = gen_int_mode (lo, outer_submode);
3169 else
3170 elems[elem] = immed_double_const (lo, hi, outer_submode);
3172 break;
3174 case MODE_FLOAT:
3176 REAL_VALUE_TYPE r;
3177 long tmp[max_bitsize / 32];
3179 /* real_from_target wants its input in words affected by
3180 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3181 and use WORDS_BIG_ENDIAN instead; see the documentation
3182 of SUBREG in rtl.texi. */
3183 for (i = 0; i < max_bitsize / 32; i++)
3184 tmp[i] = 0;
3185 for (i = 0; i < elem_bitsize; i += value_bit)
3187 int ibase;
3188 if (WORDS_BIG_ENDIAN)
3189 ibase = elem_bitsize - 1 - i;
3190 else
3191 ibase = i;
3192 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3195 real_from_target (&r, tmp, outer_submode);
3196 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3198 break;
3200 default:
3201 abort ();
3204 if (VECTOR_MODE_P (outermode))
3205 return gen_rtx_CONST_VECTOR (outermode, result_v);
3206 else
3207 return result_s;
3210 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3211 Return 0 if no simplifications are possible. */
3213 simplify_subreg (enum machine_mode outermode, rtx op,
3214 enum machine_mode innermode, unsigned int byte)
3216 /* Little bit of sanity checking. */
3217 if (innermode == VOIDmode || outermode == VOIDmode
3218 || innermode == BLKmode || outermode == BLKmode)
3219 abort ();
3221 if (GET_MODE (op) != innermode
3222 && GET_MODE (op) != VOIDmode)
3223 abort ();
3225 if (byte % GET_MODE_SIZE (outermode)
3226 || byte >= GET_MODE_SIZE (innermode))
3227 abort ();
3229 if (outermode == innermode && !byte)
3230 return op;
3232 if (GET_CODE (op) == CONST_INT
3233 || GET_CODE (op) == CONST_DOUBLE
3234 || GET_CODE (op) == CONST_VECTOR)
3235 return simplify_immed_subreg (outermode, op, innermode, byte);
3237 /* Changing mode twice with SUBREG => just change it once,
3238 or not at all if changing back op starting mode. */
3239 if (GET_CODE (op) == SUBREG)
3241 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3242 int final_offset = byte + SUBREG_BYTE (op);
3243 rtx new;
3245 if (outermode == innermostmode
3246 && byte == 0 && SUBREG_BYTE (op) == 0)
3247 return SUBREG_REG (op);
3249 /* The SUBREG_BYTE represents offset, as if the value were stored
3250 in memory. Irritating exception is paradoxical subreg, where
3251 we define SUBREG_BYTE to be 0. On big endian machines, this
3252 value should be negative. For a moment, undo this exception. */
3253 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3255 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3256 if (WORDS_BIG_ENDIAN)
3257 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3258 if (BYTES_BIG_ENDIAN)
3259 final_offset += difference % UNITS_PER_WORD;
3261 if (SUBREG_BYTE (op) == 0
3262 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3264 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3265 if (WORDS_BIG_ENDIAN)
3266 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3267 if (BYTES_BIG_ENDIAN)
3268 final_offset += difference % UNITS_PER_WORD;
3271 /* See whether resulting subreg will be paradoxical. */
3272 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3274 /* In nonparadoxical subregs we can't handle negative offsets. */
3275 if (final_offset < 0)
3276 return NULL_RTX;
3277 /* Bail out in case resulting subreg would be incorrect. */
3278 if (final_offset % GET_MODE_SIZE (outermode)
3279 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3280 return NULL_RTX;
3282 else
3284 int offset = 0;
3285 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3287 /* In paradoxical subreg, see if we are still looking on lower part.
3288 If so, our SUBREG_BYTE will be 0. */
3289 if (WORDS_BIG_ENDIAN)
3290 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3291 if (BYTES_BIG_ENDIAN)
3292 offset += difference % UNITS_PER_WORD;
3293 if (offset == final_offset)
3294 final_offset = 0;
3295 else
3296 return NULL_RTX;
3299 /* Recurse for further possible simplifications. */
3300 new = simplify_subreg (outermode, SUBREG_REG (op),
3301 GET_MODE (SUBREG_REG (op)),
3302 final_offset);
3303 if (new)
3304 return new;
3305 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3308 /* SUBREG of a hard register => just change the register number
3309 and/or mode. If the hard register is not valid in that mode,
3310 suppress this simplification. If the hard register is the stack,
3311 frame, or argument pointer, leave this as a SUBREG. */
3313 if (REG_P (op)
3314 && (! REG_FUNCTION_VALUE_P (op)
3315 || ! rtx_equal_function_value_matters)
3316 && REGNO (op) < FIRST_PSEUDO_REGISTER
3317 #ifdef CANNOT_CHANGE_MODE_CLASS
3318 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3319 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3320 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3321 #endif
3322 && ((reload_completed && !frame_pointer_needed)
3323 || (REGNO (op) != FRAME_POINTER_REGNUM
3324 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3325 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3326 #endif
3328 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3329 && REGNO (op) != ARG_POINTER_REGNUM
3330 #endif
3331 && REGNO (op) != STACK_POINTER_REGNUM
3332 && subreg_offset_representable_p (REGNO (op), innermode,
3333 byte, outermode))
3335 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3336 int final_regno = subreg_hard_regno (tem, 0);
3338 /* ??? We do allow it if the current REG is not valid for
3339 its mode. This is a kludge to work around how float/complex
3340 arguments are passed on 32-bit SPARC and should be fixed. */
3341 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3342 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3344 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3346 /* Propagate original regno. We don't have any way to specify
3347 the offset inside original regno, so do so only for lowpart.
3348 The information is used only by alias analysis that can not
3349 grog partial register anyway. */
3351 if (subreg_lowpart_offset (outermode, innermode) == byte)
3352 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3353 return x;
3357 /* If we have a SUBREG of a register that we are replacing and we are
3358 replacing it with a MEM, make a new MEM and try replacing the
3359 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3360 or if we would be widening it. */
3362 if (GET_CODE (op) == MEM
3363 && ! mode_dependent_address_p (XEXP (op, 0))
3364 /* Allow splitting of volatile memory references in case we don't
3365 have instruction to move the whole thing. */
3366 && (! MEM_VOLATILE_P (op)
3367 || ! have_insn_for (SET, innermode))
3368 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3369 return adjust_address_nv (op, outermode, byte);
3371 /* Handle complex values represented as CONCAT
3372 of real and imaginary part. */
3373 if (GET_CODE (op) == CONCAT)
3375 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3376 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3377 unsigned int final_offset;
3378 rtx res;
3380 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3381 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3382 if (res)
3383 return res;
3384 /* We can at least simplify it by referring directly to the
3385 relevant part. */
3386 return gen_rtx_SUBREG (outermode, part, final_offset);
3389 /* Optimize SUBREG truncations of zero and sign extended values. */
3390 if ((GET_CODE (op) == ZERO_EXTEND
3391 || GET_CODE (op) == SIGN_EXTEND)
3392 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3394 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3396 /* If we're requesting the lowpart of a zero or sign extension,
3397 there are three possibilities. If the outermode is the same
3398 as the origmode, we can omit both the extension and the subreg.
3399 If the outermode is not larger than the origmode, we can apply
3400 the truncation without the extension. Finally, if the outermode
3401 is larger than the origmode, but both are integer modes, we
3402 can just extend to the appropriate mode. */
3403 if (bitpos == 0)
3405 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3406 if (outermode == origmode)
3407 return XEXP (op, 0);
3408 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3409 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3410 subreg_lowpart_offset (outermode,
3411 origmode));
3412 if (SCALAR_INT_MODE_P (outermode))
3413 return simplify_gen_unary (GET_CODE (op), outermode,
3414 XEXP (op, 0), origmode);
3417 /* A SUBREG resulting from a zero extension may fold to zero if
3418 it extracts higher bits that the ZERO_EXTEND's source bits. */
3419 if (GET_CODE (op) == ZERO_EXTEND
3420 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3421 return CONST0_RTX (outermode);
3424 return NULL_RTX;
3427 /* Make a SUBREG operation or equivalent if it folds. */
3430 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3431 enum machine_mode innermode, unsigned int byte)
3433 rtx new;
3434 /* Little bit of sanity checking. */
3435 if (innermode == VOIDmode || outermode == VOIDmode
3436 || innermode == BLKmode || outermode == BLKmode)
3437 abort ();
3439 if (GET_MODE (op) != innermode
3440 && GET_MODE (op) != VOIDmode)
3441 abort ();
3443 if (byte % GET_MODE_SIZE (outermode)
3444 || byte >= GET_MODE_SIZE (innermode))
3445 abort ();
3447 if (GET_CODE (op) == QUEUED)
3448 return NULL_RTX;
3450 new = simplify_subreg (outermode, op, innermode, byte);
3451 if (new)
3452 return new;
3454 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3455 return NULL_RTX;
3457 return gen_rtx_SUBREG (outermode, op, byte);
3459 /* Simplify X, an rtx expression.
3461 Return the simplified expression or NULL if no simplifications
3462 were possible.
3464 This is the preferred entry point into the simplification routines;
3465 however, we still allow passes to call the more specific routines.
3467 Right now GCC has three (yes, three) major bodies of RTL simplification
3468 code that need to be unified.
3470 1. fold_rtx in cse.c. This code uses various CSE specific
3471 information to aid in RTL simplification.
3473 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3474 it uses combine specific information to aid in RTL
3475 simplification.
3477 3. The routines in this file.
3480 Long term we want to only have one body of simplification code; to
3481 get to that state I recommend the following steps:
3483 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3484 which are not pass dependent state into these routines.
3486 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3487 use this routine whenever possible.
3489 3. Allow for pass dependent state to be provided to these
3490 routines and add simplifications based on the pass dependent
3491 state. Remove code from cse.c & combine.c that becomes
3492 redundant/dead.
3494 It will take time, but ultimately the compiler will be easier to
3495 maintain and improve. It's totally silly that when we add a
3496 simplification that it needs to be added to 4 places (3 for RTL
3497 simplification and 1 for tree simplification. */
3500 simplify_rtx (rtx x)
3502 enum rtx_code code = GET_CODE (x);
3503 enum machine_mode mode = GET_MODE (x);
3504 rtx temp;
3506 switch (GET_RTX_CLASS (code))
3508 case '1':
3509 return simplify_unary_operation (code, mode,
3510 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3511 case 'c':
3512 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3513 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3515 /* Fall through.... */
3517 case '2':
3518 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3520 case '3':
3521 case 'b':
3522 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3523 XEXP (x, 0), XEXP (x, 1),
3524 XEXP (x, 2));
3526 case '<':
3527 temp = simplify_relational_operation (code,
3528 ((GET_MODE (XEXP (x, 0))
3529 != VOIDmode)
3530 ? GET_MODE (XEXP (x, 0))
3531 : GET_MODE (XEXP (x, 1))),
3532 XEXP (x, 0), XEXP (x, 1));
3533 #ifdef FLOAT_STORE_FLAG_VALUE
3534 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3536 if (temp == const0_rtx)
3537 temp = CONST0_RTX (mode);
3538 else
3539 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3540 mode);
3542 #endif
3543 return temp;
3545 case 'x':
3546 if (code == SUBREG)
3547 return simplify_gen_subreg (mode, SUBREG_REG (x),
3548 GET_MODE (SUBREG_REG (x)),
3549 SUBREG_BYTE (x));
3550 if (code == CONSTANT_P_RTX)
3552 if (CONSTANT_P (XEXP (x, 0)))
3553 return const1_rtx;
3555 break;
3557 case 'o':
3558 if (code == LO_SUM)
3560 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3561 if (GET_CODE (XEXP (x, 0)) == HIGH
3562 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3563 return XEXP (x, 1);
3565 break;
3567 default:
3568 break;
3570 return NULL;