Nuke USB_DO_ATTACH and remove device_t dv, since it is no longer needed.
[dragonfly.git] / contrib / gcc-3.4 / gcc / simplify-rtx.c
blobd6d8f89d62ca8a8036ac22e4c356a5e7dfd5c08a
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static bool associative_constant_p (rtx);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
64 static rtx
65 neg_const_int (enum machine_mode mode, rtx i)
67 return gen_int_mode (- INTVAL (i), mode);
71 /* Make a binary operation by properly ordering the operands and
72 seeing if the expression folds. */
74 rtx
75 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
76 rtx op1)
78 rtx tem;
80 /* Put complex operands first and constants second if commutative. */
81 if (GET_RTX_CLASS (code) == 'c'
82 && swap_commutative_operands_p (op0, op1))
83 tem = op0, op0 = op1, op1 = tem;
85 /* If this simplifies, do it. */
86 tem = simplify_binary_operation (code, mode, op0, op1);
87 if (tem)
88 return tem;
90 /* Handle addition and subtraction specially. Otherwise, just form
91 the operation. */
93 if (code == PLUS || code == MINUS)
95 tem = simplify_plus_minus (code, mode, op0, op1, 1);
96 if (tem)
97 return tem;
100 return gen_rtx_fmt_ee (code, mode, op0, op1);
103 /* If X is a MEM referencing the constant pool, return the real value.
104 Otherwise return X. */
106 avoid_constant_pool_reference (rtx x)
108 rtx c, tmp, addr;
109 enum machine_mode cmode;
111 switch (GET_CODE (x))
113 case MEM:
114 break;
116 case FLOAT_EXTEND:
117 /* Handle float extensions of constant pool references. */
118 tmp = XEXP (x, 0);
119 c = avoid_constant_pool_reference (tmp);
120 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
122 REAL_VALUE_TYPE d;
124 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
125 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
127 return x;
129 default:
130 return x;
133 addr = XEXP (x, 0);
135 /* Call target hook to avoid the effects of -fpic etc.... */
136 addr = (*targetm.delegitimize_address) (addr);
138 if (GET_CODE (addr) == LO_SUM)
139 addr = XEXP (addr, 1);
141 if (GET_CODE (addr) != SYMBOL_REF
142 || ! CONSTANT_POOL_ADDRESS_P (addr))
143 return x;
145 c = get_pool_constant (addr);
146 cmode = get_pool_mode (addr);
148 /* If we're accessing the constant in a different mode than it was
149 originally stored, attempt to fix that up via subreg simplifications.
150 If that fails we have no choice but to return the original memory. */
151 if (cmode != GET_MODE (x))
153 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
154 return c ? c : x;
157 return c;
160 /* Make a unary operation by first seeing if it folds and otherwise making
161 the specified operation. */
164 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
165 enum machine_mode op_mode)
167 rtx tem;
169 /* If this simplifies, use it. */
170 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
171 return tem;
173 return gen_rtx_fmt_e (code, mode, op);
176 /* Likewise for ternary operations. */
179 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
180 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
182 rtx tem;
184 /* If this simplifies, use it. */
185 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
186 op0, op1, op2)))
187 return tem;
189 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
192 /* Return true if X is a MEM referencing the constant pool. */
194 bool
195 constant_pool_reference_p (rtx x)
197 return avoid_constant_pool_reference (x) != x;
200 /* Likewise, for relational operations.
201 CMP_MODE specifies mode comparison is done in.
205 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
206 enum machine_mode cmp_mode, rtx op0, rtx op1)
208 rtx tem;
210 if (cmp_mode == VOIDmode)
211 cmp_mode = GET_MODE (op0);
212 if (cmp_mode == VOIDmode)
213 cmp_mode = GET_MODE (op1);
215 if (cmp_mode != VOIDmode
216 && ! VECTOR_MODE_P (mode))
218 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
220 if (tem)
222 #ifdef FLOAT_STORE_FLAG_VALUE
223 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
225 REAL_VALUE_TYPE val;
226 if (tem == const0_rtx)
227 return CONST0_RTX (mode);
228 if (tem != const_true_rtx)
229 abort ();
230 val = FLOAT_STORE_FLAG_VALUE (mode);
231 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
233 #endif
234 return tem;
238 /* For the following tests, ensure const0_rtx is op1. */
239 if (swap_commutative_operands_p (op0, op1)
240 || (op0 == const0_rtx && op1 != const0_rtx))
241 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
243 /* If op0 is a compare, extract the comparison arguments from it. */
244 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
245 return simplify_gen_relational (code, mode, VOIDmode,
246 XEXP (op0, 0), XEXP (op0, 1));
248 /* If op0 is a comparison, extract the comparison arguments form it. */
249 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
251 if (code == NE)
253 if (GET_MODE (op0) == mode)
254 return op0;
255 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
256 XEXP (op0, 0), XEXP (op0, 1));
258 else if (code == EQ)
260 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
261 if (new != UNKNOWN)
262 return simplify_gen_relational (new, mode, VOIDmode,
263 XEXP (op0, 0), XEXP (op0, 1));
267 return gen_rtx_fmt_ee (code, mode, op0, op1);
270 /* Replace all occurrences of OLD in X with NEW and try to simplify the
271 resulting RTX. Return a new RTX which is as simplified as possible. */
274 simplify_replace_rtx (rtx x, rtx old, rtx new)
276 enum rtx_code code = GET_CODE (x);
277 enum machine_mode mode = GET_MODE (x);
278 enum machine_mode op_mode;
279 rtx op0, op1, op2;
281 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
282 to build a new expression substituting recursively. If we can't do
283 anything, return our input. */
285 if (x == old)
286 return new;
288 switch (GET_RTX_CLASS (code))
290 case '1':
291 op0 = XEXP (x, 0);
292 op_mode = GET_MODE (op0);
293 op0 = simplify_replace_rtx (op0, old, new);
294 if (op0 == XEXP (x, 0))
295 return x;
296 return simplify_gen_unary (code, mode, op0, op_mode);
298 case '2':
299 case 'c':
300 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
301 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
302 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303 return x;
304 return simplify_gen_binary (code, mode, op0, op1);
306 case '<':
307 op0 = XEXP (x, 0);
308 op1 = XEXP (x, 1);
309 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
310 op0 = simplify_replace_rtx (op0, old, new);
311 op1 = simplify_replace_rtx (op1, old, new);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
313 return x;
314 return simplify_gen_relational (code, mode, op_mode, op0, op1);
316 case '3':
317 case 'b':
318 op0 = XEXP (x, 0);
319 op_mode = GET_MODE (op0);
320 op0 = simplify_replace_rtx (op0, old, new);
321 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
322 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
323 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
324 return x;
325 if (op_mode == VOIDmode)
326 op_mode = GET_MODE (op0);
327 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
329 case 'x':
330 /* The only case we try to handle is a SUBREG. */
331 if (code == SUBREG)
333 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
334 if (op0 == SUBREG_REG (x))
335 return x;
336 op0 = simplify_gen_subreg (GET_MODE (x), op0,
337 GET_MODE (SUBREG_REG (x)),
338 SUBREG_BYTE (x));
339 return op0 ? op0 : x;
341 break;
343 case 'o':
344 if (code == MEM)
346 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
347 if (op0 == XEXP (x, 0))
348 return x;
349 return replace_equiv_address_nv (x, op0);
351 else if (code == LO_SUM)
353 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
354 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
356 /* (lo_sum (high x) x) -> x */
357 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
358 return op1;
360 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
361 return x;
362 return gen_rtx_LO_SUM (mode, op0, op1);
364 else if (code == REG)
366 if (REG_P (old) && REGNO (x) == REGNO (old))
367 return new;
369 break;
371 default:
372 break;
374 return x;
377 /* Try to simplify a unary operation CODE whose output mode is to be
378 MODE with input operand OP whose mode was originally OP_MODE.
379 Return zero if no simplification can be made. */
381 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
382 rtx op, enum machine_mode op_mode)
384 unsigned int width = GET_MODE_BITSIZE (mode);
385 rtx trueop = avoid_constant_pool_reference (op);
387 if (code == VEC_DUPLICATE)
389 if (!VECTOR_MODE_P (mode))
390 abort ();
391 if (GET_MODE (trueop) != VOIDmode
392 && !VECTOR_MODE_P (GET_MODE (trueop))
393 && GET_MODE_INNER (mode) != GET_MODE (trueop))
394 abort ();
395 if (GET_MODE (trueop) != VOIDmode
396 && VECTOR_MODE_P (GET_MODE (trueop))
397 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
398 abort ();
399 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
400 || GET_CODE (trueop) == CONST_VECTOR)
402 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
403 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
404 rtvec v = rtvec_alloc (n_elts);
405 unsigned int i;
407 if (GET_CODE (trueop) != CONST_VECTOR)
408 for (i = 0; i < n_elts; i++)
409 RTVEC_ELT (v, i) = trueop;
410 else
412 enum machine_mode inmode = GET_MODE (trueop);
413 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
414 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
416 if (in_n_elts >= n_elts || n_elts % in_n_elts)
417 abort ();
418 for (i = 0; i < n_elts; i++)
419 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
421 return gen_rtx_CONST_VECTOR (mode, v);
424 else if (GET_CODE (op) == CONST)
425 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
427 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
429 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
430 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
431 enum machine_mode opmode = GET_MODE (trueop);
432 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
433 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
434 rtvec v = rtvec_alloc (n_elts);
435 unsigned int i;
437 if (op_n_elts != n_elts)
438 abort ();
440 for (i = 0; i < n_elts; i++)
442 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
443 CONST_VECTOR_ELT (trueop, i),
444 GET_MODE_INNER (opmode));
445 if (!x)
446 return 0;
447 RTVEC_ELT (v, i) = x;
449 return gen_rtx_CONST_VECTOR (mode, v);
452 /* The order of these tests is critical so that, for example, we don't
453 check the wrong mode (input vs. output) for a conversion operation,
454 such as FIX. At some point, this should be simplified. */
456 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
457 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
459 HOST_WIDE_INT hv, lv;
460 REAL_VALUE_TYPE d;
462 if (GET_CODE (trueop) == CONST_INT)
463 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
464 else
465 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
467 REAL_VALUE_FROM_INT (d, lv, hv, mode);
468 d = real_value_truncate (mode, d);
469 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
471 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
472 && (GET_CODE (trueop) == CONST_DOUBLE
473 || GET_CODE (trueop) == CONST_INT))
475 HOST_WIDE_INT hv, lv;
476 REAL_VALUE_TYPE d;
478 if (GET_CODE (trueop) == CONST_INT)
479 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
480 else
481 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
483 if (op_mode == VOIDmode)
485 /* We don't know how to interpret negative-looking numbers in
486 this case, so don't try to fold those. */
487 if (hv < 0)
488 return 0;
490 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
492 else
493 hv = 0, lv &= GET_MODE_MASK (op_mode);
495 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
496 d = real_value_truncate (mode, d);
497 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
500 if (GET_CODE (trueop) == CONST_INT
501 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
503 HOST_WIDE_INT arg0 = INTVAL (trueop);
504 HOST_WIDE_INT val;
506 switch (code)
508 case NOT:
509 val = ~ arg0;
510 break;
512 case NEG:
513 val = - arg0;
514 break;
516 case ABS:
517 val = (arg0 >= 0 ? arg0 : - arg0);
518 break;
520 case FFS:
521 /* Don't use ffs here. Instead, get low order bit and then its
522 number. If arg0 is zero, this will return 0, as desired. */
523 arg0 &= GET_MODE_MASK (mode);
524 val = exact_log2 (arg0 & (- arg0)) + 1;
525 break;
527 case CLZ:
528 arg0 &= GET_MODE_MASK (mode);
529 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
531 else
532 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
533 break;
535 case CTZ:
536 arg0 &= GET_MODE_MASK (mode);
537 if (arg0 == 0)
539 /* Even if the value at zero is undefined, we have to come
540 up with some replacement. Seems good enough. */
541 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
542 val = GET_MODE_BITSIZE (mode);
544 else
545 val = exact_log2 (arg0 & -arg0);
546 break;
548 case POPCOUNT:
549 arg0 &= GET_MODE_MASK (mode);
550 val = 0;
551 while (arg0)
552 val++, arg0 &= arg0 - 1;
553 break;
555 case PARITY:
556 arg0 &= GET_MODE_MASK (mode);
557 val = 0;
558 while (arg0)
559 val++, arg0 &= arg0 - 1;
560 val &= 1;
561 break;
563 case TRUNCATE:
564 val = arg0;
565 break;
567 case ZERO_EXTEND:
568 /* When zero-extending a CONST_INT, we need to know its
569 original mode. */
570 if (op_mode == VOIDmode)
571 abort ();
572 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
574 /* If we were really extending the mode,
575 we would have to distinguish between zero-extension
576 and sign-extension. */
577 if (width != GET_MODE_BITSIZE (op_mode))
578 abort ();
579 val = arg0;
581 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
582 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
583 else
584 return 0;
585 break;
587 case SIGN_EXTEND:
588 if (op_mode == VOIDmode)
589 op_mode = mode;
590 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
592 /* If we were really extending the mode,
593 we would have to distinguish between zero-extension
594 and sign-extension. */
595 if (width != GET_MODE_BITSIZE (op_mode))
596 abort ();
597 val = arg0;
599 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
602 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
603 if (val
604 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
605 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
607 else
608 return 0;
609 break;
611 case SQRT:
612 case FLOAT_EXTEND:
613 case FLOAT_TRUNCATE:
614 case SS_TRUNCATE:
615 case US_TRUNCATE:
616 return 0;
618 default:
619 abort ();
622 val = trunc_int_for_mode (val, mode);
624 return GEN_INT (val);
627 /* We can do some operations on integer CONST_DOUBLEs. Also allow
628 for a DImode operation on a CONST_INT. */
629 else if (GET_MODE (trueop) == VOIDmode
630 && width <= HOST_BITS_PER_WIDE_INT * 2
631 && (GET_CODE (trueop) == CONST_DOUBLE
632 || GET_CODE (trueop) == CONST_INT))
634 unsigned HOST_WIDE_INT l1, lv;
635 HOST_WIDE_INT h1, hv;
637 if (GET_CODE (trueop) == CONST_DOUBLE)
638 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
639 else
640 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
642 switch (code)
644 case NOT:
645 lv = ~ l1;
646 hv = ~ h1;
647 break;
649 case NEG:
650 neg_double (l1, h1, &lv, &hv);
651 break;
653 case ABS:
654 if (h1 < 0)
655 neg_double (l1, h1, &lv, &hv);
656 else
657 lv = l1, hv = h1;
658 break;
660 case FFS:
661 hv = 0;
662 if (l1 == 0)
664 if (h1 == 0)
665 lv = 0;
666 else
667 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
669 else
670 lv = exact_log2 (l1 & -l1) + 1;
671 break;
673 case CLZ:
674 hv = 0;
675 if (h1 != 0)
676 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
677 - HOST_BITS_PER_WIDE_INT;
678 else if (l1 != 0)
679 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
680 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
681 lv = GET_MODE_BITSIZE (mode);
682 break;
684 case CTZ:
685 hv = 0;
686 if (l1 != 0)
687 lv = exact_log2 (l1 & -l1);
688 else if (h1 != 0)
689 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
690 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
691 lv = GET_MODE_BITSIZE (mode);
692 break;
694 case POPCOUNT:
695 hv = 0;
696 lv = 0;
697 while (l1)
698 lv++, l1 &= l1 - 1;
699 while (h1)
700 lv++, h1 &= h1 - 1;
701 break;
703 case PARITY:
704 hv = 0;
705 lv = 0;
706 while (l1)
707 lv++, l1 &= l1 - 1;
708 while (h1)
709 lv++, h1 &= h1 - 1;
710 lv &= 1;
711 break;
713 case TRUNCATE:
714 /* This is just a change-of-mode, so do nothing. */
715 lv = l1, hv = h1;
716 break;
718 case ZERO_EXTEND:
719 if (op_mode == VOIDmode)
720 abort ();
722 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
723 return 0;
725 hv = 0;
726 lv = l1 & GET_MODE_MASK (op_mode);
727 break;
729 case SIGN_EXTEND:
730 if (op_mode == VOIDmode
731 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
732 return 0;
733 else
735 lv = l1 & GET_MODE_MASK (op_mode);
736 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
737 && (lv & ((HOST_WIDE_INT) 1
738 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
739 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
741 hv = HWI_SIGN_EXTEND (lv);
743 break;
745 case SQRT:
746 return 0;
748 default:
749 return 0;
752 return immed_double_const (lv, hv, mode);
755 else if (GET_CODE (trueop) == CONST_DOUBLE
756 && GET_MODE_CLASS (mode) == MODE_FLOAT)
758 REAL_VALUE_TYPE d, t;
759 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
761 switch (code)
763 case SQRT:
764 if (HONOR_SNANS (mode) && real_isnan (&d))
765 return 0;
766 real_sqrt (&t, mode, &d);
767 d = t;
768 break;
769 case ABS:
770 d = REAL_VALUE_ABS (d);
771 break;
772 case NEG:
773 d = REAL_VALUE_NEGATE (d);
774 break;
775 case FLOAT_TRUNCATE:
776 d = real_value_truncate (mode, d);
777 break;
778 case FLOAT_EXTEND:
779 /* All this does is change the mode. */
780 break;
781 case FIX:
782 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
783 break;
784 case NOT:
786 long tmp[4];
787 int i;
789 real_to_target (tmp, &d, GET_MODE (trueop));
790 for (i = 0; i < 4; i++)
791 tmp[i] = ~tmp[i];
792 real_from_target (&d, tmp, mode);
793 break;
795 default:
796 abort ();
798 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
801 else if (GET_CODE (trueop) == CONST_DOUBLE
802 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
803 && GET_MODE_CLASS (mode) == MODE_INT
804 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
806 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
807 operators are intentionally left unspecified (to ease implementation
808 by target backends), for consistency, this routine implements the
809 same semantics for constant folding as used by the middle-end. */
811 HOST_WIDE_INT xh, xl, th, tl;
812 REAL_VALUE_TYPE x, t;
813 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
814 switch (code)
816 case FIX:
817 if (REAL_VALUE_ISNAN (x))
818 return const0_rtx;
820 /* Test against the signed upper bound. */
821 if (width > HOST_BITS_PER_WIDE_INT)
823 th = ((unsigned HOST_WIDE_INT) 1
824 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
825 tl = -1;
827 else
829 th = 0;
830 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
832 real_from_integer (&t, VOIDmode, tl, th, 0);
833 if (REAL_VALUES_LESS (t, x))
835 xh = th;
836 xl = tl;
837 break;
840 /* Test against the signed lower bound. */
841 if (width > HOST_BITS_PER_WIDE_INT)
843 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
844 tl = 0;
846 else
848 th = -1;
849 tl = (HOST_WIDE_INT) -1 << (width - 1);
851 real_from_integer (&t, VOIDmode, tl, th, 0);
852 if (REAL_VALUES_LESS (x, t))
854 xh = th;
855 xl = tl;
856 break;
858 REAL_VALUE_TO_INT (&xl, &xh, x);
859 break;
861 case UNSIGNED_FIX:
862 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
863 return const0_rtx;
865 /* Test against the unsigned upper bound. */
866 if (width == 2*HOST_BITS_PER_WIDE_INT)
868 th = -1;
869 tl = -1;
871 else if (width >= HOST_BITS_PER_WIDE_INT)
873 th = ((unsigned HOST_WIDE_INT) 1
874 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
875 tl = -1;
877 else
879 th = 0;
880 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
882 real_from_integer (&t, VOIDmode, tl, th, 1);
883 if (REAL_VALUES_LESS (t, x))
885 xh = th;
886 xl = tl;
887 break;
890 REAL_VALUE_TO_INT (&xl, &xh, x);
891 break;
893 default:
894 abort ();
896 return immed_double_const (xl, xh, mode);
899 /* This was formerly used only for non-IEEE float.
900 eggert@twinsun.com says it is safe for IEEE also. */
901 else
903 enum rtx_code reversed;
904 rtx temp;
906 /* There are some simplifications we can do even if the operands
907 aren't constant. */
908 switch (code)
910 case NOT:
911 /* (not (not X)) == X. */
912 if (GET_CODE (op) == NOT)
913 return XEXP (op, 0);
915 /* (not (eq X Y)) == (ne X Y), etc. */
916 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
917 && (mode == BImode || STORE_FLAG_VALUE == -1)
918 && ((reversed = reversed_comparison_code (op, NULL_RTX))
919 != UNKNOWN))
920 return simplify_gen_relational (reversed, mode, VOIDmode,
921 XEXP (op, 0), XEXP (op, 1));
923 /* (not (plus X -1)) can become (neg X). */
924 if (GET_CODE (op) == PLUS
925 && XEXP (op, 1) == constm1_rtx)
926 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
928 /* Similarly, (not (neg X)) is (plus X -1). */
929 if (GET_CODE (op) == NEG)
930 return plus_constant (XEXP (op, 0), -1);
932 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
933 if (GET_CODE (op) == XOR
934 && GET_CODE (XEXP (op, 1)) == CONST_INT
935 && (temp = simplify_unary_operation (NOT, mode,
936 XEXP (op, 1),
937 mode)) != 0)
938 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
941 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
942 operands other than 1, but that is not valid. We could do a
943 similar simplification for (not (lshiftrt C X)) where C is
944 just the sign bit, but this doesn't seem common enough to
945 bother with. */
946 if (GET_CODE (op) == ASHIFT
947 && XEXP (op, 0) == const1_rtx)
949 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
950 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
953 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
954 by reversing the comparison code if valid. */
955 if (STORE_FLAG_VALUE == -1
956 && GET_RTX_CLASS (GET_CODE (op)) == '<'
957 && (reversed = reversed_comparison_code (op, NULL_RTX))
958 != UNKNOWN)
959 return simplify_gen_relational (reversed, mode, VOIDmode,
960 XEXP (op, 0), XEXP (op, 1));
962 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
963 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
964 so we can perform the above simplification. */
966 if (STORE_FLAG_VALUE == -1
967 && GET_CODE (op) == ASHIFTRT
968 && GET_CODE (XEXP (op, 1)) == CONST_INT
969 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
970 return simplify_gen_relational (GE, mode, VOIDmode,
971 XEXP (op, 0), const0_rtx);
973 break;
975 case NEG:
976 /* (neg (neg X)) == X. */
977 if (GET_CODE (op) == NEG)
978 return XEXP (op, 0);
980 /* (neg (plus X 1)) can become (not X). */
981 if (GET_CODE (op) == PLUS
982 && XEXP (op, 1) == const1_rtx)
983 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
985 /* Similarly, (neg (not X)) is (plus X 1). */
986 if (GET_CODE (op) == NOT)
987 return plus_constant (XEXP (op, 0), 1);
989 /* (neg (minus X Y)) can become (minus Y X). This transformation
990 isn't safe for modes with signed zeros, since if X and Y are
991 both +0, (minus Y X) is the same as (minus X Y). If the
992 rounding mode is towards +infinity (or -infinity) then the two
993 expressions will be rounded differently. */
994 if (GET_CODE (op) == MINUS
995 && !HONOR_SIGNED_ZEROS (mode)
996 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
997 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
998 XEXP (op, 0));
1000 if (GET_CODE (op) == PLUS
1001 && !HONOR_SIGNED_ZEROS (mode)
1002 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1004 /* (neg (plus A C)) is simplified to (minus -C A). */
1005 if (GET_CODE (XEXP (op, 1)) == CONST_INT
1006 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
1008 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
1009 mode);
1010 if (temp)
1011 return simplify_gen_binary (MINUS, mode, temp,
1012 XEXP (op, 0));
1015 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1016 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1017 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1020 /* (neg (mult A B)) becomes (mult (neg A) B).
1021 This works even for floating-point values. */
1022 if (GET_CODE (op) == MULT
1023 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1025 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1026 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1029 /* NEG commutes with ASHIFT since it is multiplication. Only do
1030 this if we can then eliminate the NEG (e.g., if the operand
1031 is a constant). */
1032 if (GET_CODE (op) == ASHIFT)
1034 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1035 mode);
1036 if (temp)
1037 return simplify_gen_binary (ASHIFT, mode, temp,
1038 XEXP (op, 1));
1041 break;
1043 case SIGN_EXTEND:
1044 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1045 becomes just the MINUS if its mode is MODE. This allows
1046 folding switch statements on machines using casesi (such as
1047 the VAX). */
1048 if (GET_CODE (op) == TRUNCATE
1049 && GET_MODE (XEXP (op, 0)) == mode
1050 && GET_CODE (XEXP (op, 0)) == MINUS
1051 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1052 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1053 return XEXP (op, 0);
1055 /* Check for a sign extension of a subreg of a promoted
1056 variable, where the promotion is sign-extended, and the
1057 target mode is the same as the variable's promotion. */
1058 if (GET_CODE (op) == SUBREG
1059 && SUBREG_PROMOTED_VAR_P (op)
1060 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1061 && GET_MODE (XEXP (op, 0)) == mode)
1062 return XEXP (op, 0);
1064 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1065 if (! POINTERS_EXTEND_UNSIGNED
1066 && mode == Pmode && GET_MODE (op) == ptr_mode
1067 && (CONSTANT_P (op)
1068 || (GET_CODE (op) == SUBREG
1069 && GET_CODE (SUBREG_REG (op)) == REG
1070 && REG_POINTER (SUBREG_REG (op))
1071 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1072 return convert_memory_address (Pmode, op);
1073 #endif
1074 break;
1076 case ZERO_EXTEND:
1077 /* Check for a zero extension of a subreg of a promoted
1078 variable, where the promotion is zero-extended, and the
1079 target mode is the same as the variable's promotion. */
1080 if (GET_CODE (op) == SUBREG
1081 && SUBREG_PROMOTED_VAR_P (op)
1082 && SUBREG_PROMOTED_UNSIGNED_P (op)
1083 && GET_MODE (XEXP (op, 0)) == mode)
1084 return XEXP (op, 0);
1086 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1087 if (POINTERS_EXTEND_UNSIGNED > 0
1088 && mode == Pmode && GET_MODE (op) == ptr_mode
1089 && (CONSTANT_P (op)
1090 || (GET_CODE (op) == SUBREG
1091 && GET_CODE (SUBREG_REG (op)) == REG
1092 && REG_POINTER (SUBREG_REG (op))
1093 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1094 return convert_memory_address (Pmode, op);
1095 #endif
1096 break;
1098 default:
1099 break;
1102 return 0;
1106 /* Subroutine of simplify_associative_operation. Return true if rtx OP
1107 is a suitable integer or floating point immediate constant. */
1108 static bool
1109 associative_constant_p (rtx op)
1111 if (GET_CODE (op) == CONST_INT
1112 || GET_CODE (op) == CONST_DOUBLE)
1113 return true;
1114 op = avoid_constant_pool_reference (op);
1115 return GET_CODE (op) == CONST_INT
1116 || GET_CODE (op) == CONST_DOUBLE;
1119 /* Subroutine of simplify_binary_operation to simplify an associative
1120 binary operation CODE with result mode MODE, operating on OP0 and OP1.
1121 Return 0 if no simplification is possible. */
1122 static rtx
1123 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1124 rtx op0, rtx op1)
1126 rtx tem;
1128 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
1129 if (GET_CODE (op0) == code
1130 && associative_constant_p (op1)
1131 && associative_constant_p (XEXP (op0, 1)))
1133 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1134 if (! tem)
1135 return tem;
1136 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1139 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1140 if (GET_CODE (op0) == code
1141 && GET_CODE (op1) == code
1142 && associative_constant_p (XEXP (op0, 1))
1143 && associative_constant_p (XEXP (op1, 1)))
1145 rtx c = simplify_binary_operation (code, mode,
1146 XEXP (op0, 1), XEXP (op1, 1));
1147 if (! c)
1148 return 0;
1149 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1150 return simplify_gen_binary (code, mode, tem, c);
1153 /* Canonicalize (x op c) op y as (x op y) op c. */
1154 if (GET_CODE (op0) == code
1155 && associative_constant_p (XEXP (op0, 1)))
1157 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1158 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1161 /* Canonicalize x op (y op c) as (x op y) op c. */
1162 if (GET_CODE (op1) == code
1163 && associative_constant_p (XEXP (op1, 1)))
1165 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1166 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1169 return 0;
1172 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1173 and OP1. Return 0 if no simplification is possible.
1175 Don't use this for relational operations such as EQ or LT.
1176 Use simplify_relational_operation instead. */
1178 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1179 rtx op0, rtx op1)
1181 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1182 HOST_WIDE_INT val;
1183 unsigned int width = GET_MODE_BITSIZE (mode);
1184 rtx tem;
1185 rtx trueop0 = avoid_constant_pool_reference (op0);
1186 rtx trueop1 = avoid_constant_pool_reference (op1);
1188 /* Relational operations don't work here. We must know the mode
1189 of the operands in order to do the comparison correctly.
1190 Assuming a full word can give incorrect results.
1191 Consider comparing 128 with -128 in QImode. */
1193 if (GET_RTX_CLASS (code) == '<')
1194 abort ();
1196 /* Make sure the constant is second. */
1197 if (GET_RTX_CLASS (code) == 'c'
1198 && swap_commutative_operands_p (trueop0, trueop1))
1200 tem = op0, op0 = op1, op1 = tem;
1201 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1204 if (VECTOR_MODE_P (mode)
1205 && code != VEC_CONCAT
1206 && GET_CODE (trueop0) == CONST_VECTOR
1207 && GET_CODE (trueop1) == CONST_VECTOR)
1209 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1210 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1211 enum machine_mode op0mode = GET_MODE (trueop0);
1212 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1213 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1214 enum machine_mode op1mode = GET_MODE (trueop1);
1215 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1216 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1217 rtvec v = rtvec_alloc (n_elts);
1218 unsigned int i;
1220 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1221 abort ();
1223 for (i = 0; i < n_elts; i++)
1225 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1226 CONST_VECTOR_ELT (trueop0, i),
1227 CONST_VECTOR_ELT (trueop1, i));
1228 if (!x)
1229 return 0;
1230 RTVEC_ELT (v, i) = x;
1233 return gen_rtx_CONST_VECTOR (mode, v);
1236 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1237 && GET_CODE (trueop0) == CONST_DOUBLE
1238 && GET_CODE (trueop1) == CONST_DOUBLE
1239 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1241 if (code == AND
1242 || code == IOR
1243 || code == XOR)
1245 long tmp0[4];
1246 long tmp1[4];
1247 REAL_VALUE_TYPE r;
1248 int i;
1250 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1251 GET_MODE (op0));
1252 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1253 GET_MODE (op1));
1254 for (i = 0; i < 4; i++)
1256 if (code == AND)
1257 tmp0[i] &= tmp1[i];
1258 else if (code == IOR)
1259 tmp0[i] |= tmp1[i];
1260 else if (code == XOR)
1261 tmp0[i] ^= tmp1[i];
1262 else
1263 abort ();
1265 real_from_target (&r, tmp0, mode);
1266 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1268 else
1270 REAL_VALUE_TYPE f0, f1, value;
1272 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1273 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1274 f0 = real_value_truncate (mode, f0);
1275 f1 = real_value_truncate (mode, f1);
1277 if (HONOR_SNANS (mode)
1278 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1279 return 0;
1281 if (code == DIV
1282 && REAL_VALUES_EQUAL (f1, dconst0)
1283 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1284 return 0;
1286 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1287 && flag_trapping_math
1288 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1290 int s0 = REAL_VALUE_NEGATIVE (f0);
1291 int s1 = REAL_VALUE_NEGATIVE (f1);
1293 switch (code)
1295 case PLUS:
1296 /* Inf + -Inf = NaN plus exception. */
1297 if (s0 != s1)
1298 return 0;
1299 break;
1300 case MINUS:
1301 /* Inf - Inf = NaN plus exception. */
1302 if (s0 == s1)
1303 return 0;
1304 break;
1305 case DIV:
1306 /* Inf / Inf = NaN plus exception. */
1307 return 0;
1308 default:
1309 break;
1313 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1314 && flag_trapping_math
1315 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1316 || (REAL_VALUE_ISINF (f1)
1317 && REAL_VALUES_EQUAL (f0, dconst0))))
1318 /* Inf * 0 = NaN plus exception. */
1319 return 0;
1321 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1323 value = real_value_truncate (mode, value);
1324 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1328 /* We can fold some multi-word operations. */
1329 if (GET_MODE_CLASS (mode) == MODE_INT
1330 && width == HOST_BITS_PER_WIDE_INT * 2
1331 && (GET_CODE (trueop0) == CONST_DOUBLE
1332 || GET_CODE (trueop0) == CONST_INT)
1333 && (GET_CODE (trueop1) == CONST_DOUBLE
1334 || GET_CODE (trueop1) == CONST_INT))
1336 unsigned HOST_WIDE_INT l1, l2, lv;
1337 HOST_WIDE_INT h1, h2, hv;
1339 if (GET_CODE (trueop0) == CONST_DOUBLE)
1340 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1341 else
1342 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1344 if (GET_CODE (trueop1) == CONST_DOUBLE)
1345 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1346 else
1347 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1349 switch (code)
1351 case MINUS:
1352 /* A - B == A + (-B). */
1353 neg_double (l2, h2, &lv, &hv);
1354 l2 = lv, h2 = hv;
1356 /* Fall through.... */
1358 case PLUS:
1359 add_double (l1, h1, l2, h2, &lv, &hv);
1360 break;
1362 case MULT:
1363 mul_double (l1, h1, l2, h2, &lv, &hv);
1364 break;
1366 case DIV: case MOD: case UDIV: case UMOD:
1367 /* We'd need to include tree.h to do this and it doesn't seem worth
1368 it. */
1369 return 0;
1371 case AND:
1372 lv = l1 & l2, hv = h1 & h2;
1373 break;
1375 case IOR:
1376 lv = l1 | l2, hv = h1 | h2;
1377 break;
1379 case XOR:
1380 lv = l1 ^ l2, hv = h1 ^ h2;
1381 break;
1383 case SMIN:
1384 if (h1 < h2
1385 || (h1 == h2
1386 && ((unsigned HOST_WIDE_INT) l1
1387 < (unsigned HOST_WIDE_INT) l2)))
1388 lv = l1, hv = h1;
1389 else
1390 lv = l2, hv = h2;
1391 break;
1393 case SMAX:
1394 if (h1 > h2
1395 || (h1 == h2
1396 && ((unsigned HOST_WIDE_INT) l1
1397 > (unsigned HOST_WIDE_INT) l2)))
1398 lv = l1, hv = h1;
1399 else
1400 lv = l2, hv = h2;
1401 break;
1403 case UMIN:
1404 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1405 || (h1 == h2
1406 && ((unsigned HOST_WIDE_INT) l1
1407 < (unsigned HOST_WIDE_INT) l2)))
1408 lv = l1, hv = h1;
1409 else
1410 lv = l2, hv = h2;
1411 break;
1413 case UMAX:
1414 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1415 || (h1 == h2
1416 && ((unsigned HOST_WIDE_INT) l1
1417 > (unsigned HOST_WIDE_INT) l2)))
1418 lv = l1, hv = h1;
1419 else
1420 lv = l2, hv = h2;
1421 break;
1423 case LSHIFTRT: case ASHIFTRT:
1424 case ASHIFT:
1425 case ROTATE: case ROTATERT:
1426 #ifdef SHIFT_COUNT_TRUNCATED
1427 if (SHIFT_COUNT_TRUNCATED)
1428 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1429 #endif
1431 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1432 return 0;
1434 if (code == LSHIFTRT || code == ASHIFTRT)
1435 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1436 code == ASHIFTRT);
1437 else if (code == ASHIFT)
1438 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1439 else if (code == ROTATE)
1440 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1441 else /* code == ROTATERT */
1442 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1443 break;
1445 default:
1446 return 0;
1449 return immed_double_const (lv, hv, mode);
1452 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1453 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1455 /* Even if we can't compute a constant result,
1456 there are some cases worth simplifying. */
1458 switch (code)
1460 case PLUS:
1461 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1462 when x is NaN, infinite, or finite and nonzero. They aren't
1463 when x is -0 and the rounding mode is not towards -infinity,
1464 since (-0) + 0 is then 0. */
1465 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1466 return op0;
1468 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1469 transformations are safe even for IEEE. */
1470 if (GET_CODE (op0) == NEG)
1471 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1472 else if (GET_CODE (op1) == NEG)
1473 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1475 /* (~a) + 1 -> -a */
1476 if (INTEGRAL_MODE_P (mode)
1477 && GET_CODE (op0) == NOT
1478 && trueop1 == const1_rtx)
1479 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1481 /* Handle both-operands-constant cases. We can only add
1482 CONST_INTs to constants since the sum of relocatable symbols
1483 can't be handled by most assemblers. Don't add CONST_INT
1484 to CONST_INT since overflow won't be computed properly if wider
1485 than HOST_BITS_PER_WIDE_INT. */
1487 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1488 && GET_CODE (op1) == CONST_INT)
1489 return plus_constant (op0, INTVAL (op1));
1490 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1491 && GET_CODE (op0) == CONST_INT)
1492 return plus_constant (op1, INTVAL (op0));
1494 /* See if this is something like X * C - X or vice versa or
1495 if the multiplication is written as a shift. If so, we can
1496 distribute and make a new multiply, shift, or maybe just
1497 have X (if C is 2 in the example above). But don't make
1498 real multiply if we didn't have one before. */
1500 if (! FLOAT_MODE_P (mode))
1502 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1503 rtx lhs = op0, rhs = op1;
1504 int had_mult = 0;
1506 if (GET_CODE (lhs) == NEG)
1507 coeff0 = -1, lhs = XEXP (lhs, 0);
1508 else if (GET_CODE (lhs) == MULT
1509 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1511 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1512 had_mult = 1;
1514 else if (GET_CODE (lhs) == ASHIFT
1515 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1516 && INTVAL (XEXP (lhs, 1)) >= 0
1517 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1519 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1520 lhs = XEXP (lhs, 0);
1523 if (GET_CODE (rhs) == NEG)
1524 coeff1 = -1, rhs = XEXP (rhs, 0);
1525 else if (GET_CODE (rhs) == MULT
1526 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1528 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1529 had_mult = 1;
1531 else if (GET_CODE (rhs) == ASHIFT
1532 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1533 && INTVAL (XEXP (rhs, 1)) >= 0
1534 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1536 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1537 rhs = XEXP (rhs, 0);
1540 if (rtx_equal_p (lhs, rhs))
1542 tem = simplify_gen_binary (MULT, mode, lhs,
1543 GEN_INT (coeff0 + coeff1));
1544 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1548 /* If one of the operands is a PLUS or a MINUS, see if we can
1549 simplify this by the associative law.
1550 Don't use the associative law for floating point.
1551 The inaccuracy makes it nonassociative,
1552 and subtle programs can break if operations are associated. */
1554 if (INTEGRAL_MODE_P (mode)
1555 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1556 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1557 || (GET_CODE (op0) == CONST
1558 && GET_CODE (XEXP (op0, 0)) == PLUS)
1559 || (GET_CODE (op1) == CONST
1560 && GET_CODE (XEXP (op1, 0)) == PLUS))
1561 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1562 return tem;
1564 /* Reassociate floating point addition only when the user
1565 specifies unsafe math optimizations. */
1566 if (FLOAT_MODE_P (mode)
1567 && flag_unsafe_math_optimizations)
1569 tem = simplify_associative_operation (code, mode, op0, op1);
1570 if (tem)
1571 return tem;
1573 break;
1575 case COMPARE:
1576 #ifdef HAVE_cc0
1577 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1578 using cc0, in which case we want to leave it as a COMPARE
1579 so we can distinguish it from a register-register-copy.
1581 In IEEE floating point, x-0 is not the same as x. */
1583 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1584 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1585 && trueop1 == CONST0_RTX (mode))
1586 return op0;
1587 #endif
1589 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1590 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1591 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1592 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1594 rtx xop00 = XEXP (op0, 0);
1595 rtx xop10 = XEXP (op1, 0);
1597 #ifdef HAVE_cc0
1598 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1599 #else
1600 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1601 && GET_MODE (xop00) == GET_MODE (xop10)
1602 && REGNO (xop00) == REGNO (xop10)
1603 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1604 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1605 #endif
1606 return xop00;
1608 break;
1610 case MINUS:
1611 /* We can't assume x-x is 0 even with non-IEEE floating point,
1612 but since it is zero except in very strange circumstances, we
1613 will treat it as zero with -funsafe-math-optimizations. */
1614 if (rtx_equal_p (trueop0, trueop1)
1615 && ! side_effects_p (op0)
1616 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1617 return CONST0_RTX (mode);
1619 /* Change subtraction from zero into negation. (0 - x) is the
1620 same as -x when x is NaN, infinite, or finite and nonzero.
1621 But if the mode has signed zeros, and does not round towards
1622 -infinity, then 0 - 0 is 0, not -0. */
1623 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1624 return simplify_gen_unary (NEG, mode, op1, mode);
1626 /* (-1 - a) is ~a. */
1627 if (trueop0 == constm1_rtx)
1628 return simplify_gen_unary (NOT, mode, op1, mode);
1630 /* Subtracting 0 has no effect unless the mode has signed zeros
1631 and supports rounding towards -infinity. In such a case,
1632 0 - 0 is -0. */
1633 if (!(HONOR_SIGNED_ZEROS (mode)
1634 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1635 && trueop1 == CONST0_RTX (mode))
1636 return op0;
1638 /* See if this is something like X * C - X or vice versa or
1639 if the multiplication is written as a shift. If so, we can
1640 distribute and make a new multiply, shift, or maybe just
1641 have X (if C is 2 in the example above). But don't make
1642 real multiply if we didn't have one before. */
1644 if (! FLOAT_MODE_P (mode))
1646 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1647 rtx lhs = op0, rhs = op1;
1648 int had_mult = 0;
1650 if (GET_CODE (lhs) == NEG)
1651 coeff0 = -1, lhs = XEXP (lhs, 0);
1652 else if (GET_CODE (lhs) == MULT
1653 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1655 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1656 had_mult = 1;
1658 else if (GET_CODE (lhs) == ASHIFT
1659 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1660 && INTVAL (XEXP (lhs, 1)) >= 0
1661 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1663 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1664 lhs = XEXP (lhs, 0);
1667 if (GET_CODE (rhs) == NEG)
1668 coeff1 = - 1, rhs = XEXP (rhs, 0);
1669 else if (GET_CODE (rhs) == MULT
1670 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1672 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1673 had_mult = 1;
1675 else if (GET_CODE (rhs) == ASHIFT
1676 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1677 && INTVAL (XEXP (rhs, 1)) >= 0
1678 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1680 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1681 rhs = XEXP (rhs, 0);
1684 if (rtx_equal_p (lhs, rhs))
1686 tem = simplify_gen_binary (MULT, mode, lhs,
1687 GEN_INT (coeff0 - coeff1));
1688 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1692 /* (a - (-b)) -> (a + b). True even for IEEE. */
1693 if (GET_CODE (op1) == NEG)
1694 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1696 /* (-x - c) may be simplified as (-c - x). */
1697 if (GET_CODE (op0) == NEG
1698 && (GET_CODE (op1) == CONST_INT
1699 || GET_CODE (op1) == CONST_DOUBLE))
1701 tem = simplify_unary_operation (NEG, mode, op1, mode);
1702 if (tem)
1703 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1706 /* If one of the operands is a PLUS or a MINUS, see if we can
1707 simplify this by the associative law.
1708 Don't use the associative law for floating point.
1709 The inaccuracy makes it nonassociative,
1710 and subtle programs can break if operations are associated. */
1712 if (INTEGRAL_MODE_P (mode)
1713 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1714 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1715 || (GET_CODE (op0) == CONST
1716 && GET_CODE (XEXP (op0, 0)) == PLUS)
1717 || (GET_CODE (op1) == CONST
1718 && GET_CODE (XEXP (op1, 0)) == PLUS))
1719 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1720 return tem;
1722 /* Don't let a relocatable value get a negative coeff. */
1723 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1724 return simplify_gen_binary (PLUS, mode,
1725 op0,
1726 neg_const_int (mode, op1));
1728 /* (x - (x & y)) -> (x & ~y) */
1729 if (GET_CODE (op1) == AND)
1731 if (rtx_equal_p (op0, XEXP (op1, 0)))
1733 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1734 GET_MODE (XEXP (op1, 1)));
1735 return simplify_gen_binary (AND, mode, op0, tem);
1737 if (rtx_equal_p (op0, XEXP (op1, 1)))
1739 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1740 GET_MODE (XEXP (op1, 0)));
1741 return simplify_gen_binary (AND, mode, op0, tem);
1744 break;
1746 case MULT:
1747 if (trueop1 == constm1_rtx)
1748 return simplify_gen_unary (NEG, mode, op0, mode);
1750 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1751 x is NaN, since x * 0 is then also NaN. Nor is it valid
1752 when the mode has signed zeros, since multiplying a negative
1753 number by 0 will give -0, not 0. */
1754 if (!HONOR_NANS (mode)
1755 && !HONOR_SIGNED_ZEROS (mode)
1756 && trueop1 == CONST0_RTX (mode)
1757 && ! side_effects_p (op0))
1758 return op1;
1760 /* In IEEE floating point, x*1 is not equivalent to x for
1761 signalling NaNs. */
1762 if (!HONOR_SNANS (mode)
1763 && trueop1 == CONST1_RTX (mode))
1764 return op0;
1766 /* Convert multiply by constant power of two into shift unless
1767 we are still generating RTL. This test is a kludge. */
1768 if (GET_CODE (trueop1) == CONST_INT
1769 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1770 /* If the mode is larger than the host word size, and the
1771 uppermost bit is set, then this isn't a power of two due
1772 to implicit sign extension. */
1773 && (width <= HOST_BITS_PER_WIDE_INT
1774 || val != HOST_BITS_PER_WIDE_INT - 1)
1775 && ! rtx_equal_function_value_matters)
1776 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1778 /* x*2 is x+x and x*(-1) is -x */
1779 if (GET_CODE (trueop1) == CONST_DOUBLE
1780 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1781 && GET_MODE (op0) == mode)
1783 REAL_VALUE_TYPE d;
1784 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1786 if (REAL_VALUES_EQUAL (d, dconst2))
1787 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1789 if (REAL_VALUES_EQUAL (d, dconstm1))
1790 return simplify_gen_unary (NEG, mode, op0, mode);
1793 /* Reassociate multiplication, but for floating point MULTs
1794 only when the user specifies unsafe math optimizations. */
1795 if (! FLOAT_MODE_P (mode)
1796 || flag_unsafe_math_optimizations)
1798 tem = simplify_associative_operation (code, mode, op0, op1);
1799 if (tem)
1800 return tem;
1802 break;
1804 case IOR:
1805 if (trueop1 == const0_rtx)
1806 return op0;
1807 if (GET_CODE (trueop1) == CONST_INT
1808 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1809 == GET_MODE_MASK (mode)))
1810 return op1;
1811 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1812 return op0;
1813 /* A | (~A) -> -1 */
1814 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1815 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1816 && ! side_effects_p (op0)
1817 && GET_MODE_CLASS (mode) != MODE_CC)
1818 return constm1_rtx;
1819 tem = simplify_associative_operation (code, mode, op0, op1);
1820 if (tem)
1821 return tem;
1822 break;
1824 case XOR:
1825 if (trueop1 == const0_rtx)
1826 return op0;
1827 if (GET_CODE (trueop1) == CONST_INT
1828 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1829 == GET_MODE_MASK (mode)))
1830 return simplify_gen_unary (NOT, mode, op0, mode);
1831 if (trueop0 == trueop1 && ! side_effects_p (op0)
1832 && GET_MODE_CLASS (mode) != MODE_CC)
1833 return const0_rtx;
1834 tem = simplify_associative_operation (code, mode, op0, op1);
1835 if (tem)
1836 return tem;
1837 break;
1839 case AND:
1840 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1841 return const0_rtx;
1842 if (GET_CODE (trueop1) == CONST_INT
1843 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1844 == GET_MODE_MASK (mode)))
1845 return op0;
1846 if (trueop0 == trueop1 && ! side_effects_p (op0)
1847 && GET_MODE_CLASS (mode) != MODE_CC)
1848 return op0;
1849 /* A & (~A) -> 0 */
1850 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1851 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1852 && ! side_effects_p (op0)
1853 && GET_MODE_CLASS (mode) != MODE_CC)
1854 return const0_rtx;
1855 tem = simplify_associative_operation (code, mode, op0, op1);
1856 if (tem)
1857 return tem;
1858 break;
1860 case UDIV:
1861 /* Convert divide by power of two into shift (divide by 1 handled
1862 below). */
1863 if (GET_CODE (trueop1) == CONST_INT
1864 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1865 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1867 /* Fall through.... */
1869 case DIV:
1870 if (trueop1 == CONST1_RTX (mode))
1872 /* On some platforms DIV uses narrower mode than its
1873 operands. */
1874 rtx x = gen_lowpart_common (mode, op0);
1875 if (x)
1876 return x;
1877 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1878 return gen_lowpart_SUBREG (mode, op0);
1879 else
1880 return op0;
1883 /* Maybe change 0 / x to 0. This transformation isn't safe for
1884 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1885 Nor is it safe for modes with signed zeros, since dividing
1886 0 by a negative number gives -0, not 0. */
1887 if (!HONOR_NANS (mode)
1888 && !HONOR_SIGNED_ZEROS (mode)
1889 && trueop0 == CONST0_RTX (mode)
1890 && ! side_effects_p (op1))
1891 return op0;
1893 /* Change division by a constant into multiplication. Only do
1894 this with -funsafe-math-optimizations. */
1895 else if (GET_CODE (trueop1) == CONST_DOUBLE
1896 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1897 && trueop1 != CONST0_RTX (mode)
1898 && flag_unsafe_math_optimizations)
1900 REAL_VALUE_TYPE d;
1901 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1903 if (! REAL_VALUES_EQUAL (d, dconst0))
1905 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1906 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1907 return simplify_gen_binary (MULT, mode, op0, tem);
1910 break;
1912 case UMOD:
1913 /* Handle modulus by power of two (mod with 1 handled below). */
1914 if (GET_CODE (trueop1) == CONST_INT
1915 && exact_log2 (INTVAL (trueop1)) > 0)
1916 return simplify_gen_binary (AND, mode, op0,
1917 GEN_INT (INTVAL (op1) - 1));
1919 /* Fall through.... */
1921 case MOD:
1922 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1923 && ! side_effects_p (op0) && ! side_effects_p (op1))
1924 return const0_rtx;
1925 break;
1927 case ROTATERT:
1928 case ROTATE:
1929 case ASHIFTRT:
1930 /* Rotating ~0 always results in ~0. */
1931 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1932 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1933 && ! side_effects_p (op1))
1934 return op0;
1936 /* Fall through.... */
1938 case ASHIFT:
1939 case LSHIFTRT:
1940 if (trueop1 == const0_rtx)
1941 return op0;
1942 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1943 return op0;
1944 break;
1946 case SMIN:
1947 if (width <= HOST_BITS_PER_WIDE_INT
1948 && GET_CODE (trueop1) == CONST_INT
1949 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1950 && ! side_effects_p (op0))
1951 return op1;
1952 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1953 return op0;
1954 tem = simplify_associative_operation (code, mode, op0, op1);
1955 if (tem)
1956 return tem;
1957 break;
1959 case SMAX:
1960 if (width <= HOST_BITS_PER_WIDE_INT
1961 && GET_CODE (trueop1) == CONST_INT
1962 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1963 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1964 && ! side_effects_p (op0))
1965 return op1;
1966 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1967 return op0;
1968 tem = simplify_associative_operation (code, mode, op0, op1);
1969 if (tem)
1970 return tem;
1971 break;
1973 case UMIN:
1974 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1975 return op1;
1976 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1977 return op0;
1978 tem = simplify_associative_operation (code, mode, op0, op1);
1979 if (tem)
1980 return tem;
1981 break;
1983 case UMAX:
1984 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1985 return op1;
1986 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1987 return op0;
1988 tem = simplify_associative_operation (code, mode, op0, op1);
1989 if (tem)
1990 return tem;
1991 break;
1993 case SS_PLUS:
1994 case US_PLUS:
1995 case SS_MINUS:
1996 case US_MINUS:
1997 /* ??? There are simplifications that can be done. */
1998 return 0;
2000 case VEC_SELECT:
2001 if (!VECTOR_MODE_P (mode))
2003 if (!VECTOR_MODE_P (GET_MODE (trueop0))
2004 || (mode
2005 != GET_MODE_INNER (GET_MODE (trueop0)))
2006 || GET_CODE (trueop1) != PARALLEL
2007 || XVECLEN (trueop1, 0) != 1
2008 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
2009 abort ();
2011 if (GET_CODE (trueop0) == CONST_VECTOR)
2012 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
2014 else
2016 if (!VECTOR_MODE_P (GET_MODE (trueop0))
2017 || (GET_MODE_INNER (mode)
2018 != GET_MODE_INNER (GET_MODE (trueop0)))
2019 || GET_CODE (trueop1) != PARALLEL)
2020 abort ();
2022 if (GET_CODE (trueop0) == CONST_VECTOR)
2024 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2025 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2026 rtvec v = rtvec_alloc (n_elts);
2027 unsigned int i;
2029 if (XVECLEN (trueop1, 0) != (int) n_elts)
2030 abort ();
2031 for (i = 0; i < n_elts; i++)
2033 rtx x = XVECEXP (trueop1, 0, i);
2035 if (GET_CODE (x) != CONST_INT)
2036 abort ();
2037 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
2040 return gen_rtx_CONST_VECTOR (mode, v);
2043 return 0;
2044 case VEC_CONCAT:
2046 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2047 ? GET_MODE (trueop0)
2048 : GET_MODE_INNER (mode));
2049 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2050 ? GET_MODE (trueop1)
2051 : GET_MODE_INNER (mode));
2053 if (!VECTOR_MODE_P (mode)
2054 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2055 != GET_MODE_SIZE (mode)))
2056 abort ();
2058 if ((VECTOR_MODE_P (op0_mode)
2059 && (GET_MODE_INNER (mode)
2060 != GET_MODE_INNER (op0_mode)))
2061 || (!VECTOR_MODE_P (op0_mode)
2062 && GET_MODE_INNER (mode) != op0_mode))
2063 abort ();
2065 if ((VECTOR_MODE_P (op1_mode)
2066 && (GET_MODE_INNER (mode)
2067 != GET_MODE_INNER (op1_mode)))
2068 || (!VECTOR_MODE_P (op1_mode)
2069 && GET_MODE_INNER (mode) != op1_mode))
2070 abort ();
2072 if ((GET_CODE (trueop0) == CONST_VECTOR
2073 || GET_CODE (trueop0) == CONST_INT
2074 || GET_CODE (trueop0) == CONST_DOUBLE)
2075 && (GET_CODE (trueop1) == CONST_VECTOR
2076 || GET_CODE (trueop1) == CONST_INT
2077 || GET_CODE (trueop1) == CONST_DOUBLE))
2079 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2080 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2081 rtvec v = rtvec_alloc (n_elts);
2082 unsigned int i;
2083 unsigned in_n_elts = 1;
2085 if (VECTOR_MODE_P (op0_mode))
2086 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2087 for (i = 0; i < n_elts; i++)
2089 if (i < in_n_elts)
2091 if (!VECTOR_MODE_P (op0_mode))
2092 RTVEC_ELT (v, i) = trueop0;
2093 else
2094 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2096 else
2098 if (!VECTOR_MODE_P (op1_mode))
2099 RTVEC_ELT (v, i) = trueop1;
2100 else
2101 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2102 i - in_n_elts);
2106 return gen_rtx_CONST_VECTOR (mode, v);
2109 return 0;
2111 default:
2112 abort ();
2115 return 0;
2118 /* Get the integer argument values in two forms:
2119 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2121 arg0 = INTVAL (trueop0);
2122 arg1 = INTVAL (trueop1);
2124 if (width < HOST_BITS_PER_WIDE_INT)
2126 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2127 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2129 arg0s = arg0;
2130 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2131 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2133 arg1s = arg1;
2134 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2135 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2137 else
2139 arg0s = arg0;
2140 arg1s = arg1;
2143 /* Compute the value of the arithmetic. */
2145 switch (code)
2147 case PLUS:
2148 val = arg0s + arg1s;
2149 break;
2151 case MINUS:
2152 val = arg0s - arg1s;
2153 break;
2155 case MULT:
2156 val = arg0s * arg1s;
2157 break;
2159 case DIV:
2160 if (arg1s == 0
2161 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2162 && arg1s == -1))
2163 return 0;
2164 val = arg0s / arg1s;
2165 break;
2167 case MOD:
2168 if (arg1s == 0
2169 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2170 && arg1s == -1))
2171 return 0;
2172 val = arg0s % arg1s;
2173 break;
2175 case UDIV:
2176 if (arg1 == 0
2177 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2178 && arg1s == -1))
2179 return 0;
2180 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2181 break;
2183 case UMOD:
2184 if (arg1 == 0
2185 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2186 && arg1s == -1))
2187 return 0;
2188 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2189 break;
2191 case AND:
2192 val = arg0 & arg1;
2193 break;
2195 case IOR:
2196 val = arg0 | arg1;
2197 break;
2199 case XOR:
2200 val = arg0 ^ arg1;
2201 break;
2203 case LSHIFTRT:
2204 /* If shift count is undefined, don't fold it; let the machine do
2205 what it wants. But truncate it if the machine will do that. */
2206 if (arg1 < 0)
2207 return 0;
2209 #ifdef SHIFT_COUNT_TRUNCATED
2210 if (SHIFT_COUNT_TRUNCATED)
2211 arg1 %= width;
2212 #endif
2214 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2215 break;
2217 case ASHIFT:
2218 if (arg1 < 0)
2219 return 0;
2221 #ifdef SHIFT_COUNT_TRUNCATED
2222 if (SHIFT_COUNT_TRUNCATED)
2223 arg1 %= width;
2224 #endif
2226 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2227 break;
2229 case ASHIFTRT:
2230 if (arg1 < 0)
2231 return 0;
2233 #ifdef SHIFT_COUNT_TRUNCATED
2234 if (SHIFT_COUNT_TRUNCATED)
2235 arg1 %= width;
2236 #endif
2238 val = arg0s >> arg1;
2240 /* Bootstrap compiler may not have sign extended the right shift.
2241 Manually extend the sign to insure bootstrap cc matches gcc. */
2242 if (arg0s < 0 && arg1 > 0)
2243 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2245 break;
2247 case ROTATERT:
2248 if (arg1 < 0)
2249 return 0;
2251 arg1 %= width;
2252 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2253 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2254 break;
2256 case ROTATE:
2257 if (arg1 < 0)
2258 return 0;
2260 arg1 %= width;
2261 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2262 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2263 break;
2265 case COMPARE:
2266 /* Do nothing here. */
2267 return 0;
2269 case SMIN:
2270 val = arg0s <= arg1s ? arg0s : arg1s;
2271 break;
2273 case UMIN:
2274 val = ((unsigned HOST_WIDE_INT) arg0
2275 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2276 break;
2278 case SMAX:
2279 val = arg0s > arg1s ? arg0s : arg1s;
2280 break;
2282 case UMAX:
2283 val = ((unsigned HOST_WIDE_INT) arg0
2284 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2285 break;
2287 case SS_PLUS:
2288 case US_PLUS:
2289 case SS_MINUS:
2290 case US_MINUS:
2291 /* ??? There are simplifications that can be done. */
2292 return 0;
2294 default:
2295 abort ();
2298 val = trunc_int_for_mode (val, mode);
2300 return GEN_INT (val);
2303 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2304 PLUS or MINUS.
2306 Rather than test for specific case, we do this by a brute-force method
2307 and do all possible simplifications until no more changes occur. Then
2308 we rebuild the operation.
2310 If FORCE is true, then always generate the rtx. This is used to
2311 canonicalize stuff emitted from simplify_gen_binary. Note that this
2312 can still fail if the rtx is too complex. It won't fail just because
2313 the result is not 'simpler' than the input, however. */
2315 struct simplify_plus_minus_op_data
2317 rtx op;
2318 int neg;
2321 static int
2322 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2324 const struct simplify_plus_minus_op_data *d1 = p1;
2325 const struct simplify_plus_minus_op_data *d2 = p2;
2327 return (commutative_operand_precedence (d2->op)
2328 - commutative_operand_precedence (d1->op));
2331 static rtx
2332 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2333 rtx op1, int force)
2335 struct simplify_plus_minus_op_data ops[8];
2336 rtx result, tem;
2337 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2338 int first, changed;
2339 int i, j;
2341 memset (ops, 0, sizeof ops);
2343 /* Set up the two operands and then expand them until nothing has been
2344 changed. If we run out of room in our array, give up; this should
2345 almost never happen. */
2347 ops[0].op = op0;
2348 ops[0].neg = 0;
2349 ops[1].op = op1;
2350 ops[1].neg = (code == MINUS);
2354 changed = 0;
2356 for (i = 0; i < n_ops; i++)
2358 rtx this_op = ops[i].op;
2359 int this_neg = ops[i].neg;
2360 enum rtx_code this_code = GET_CODE (this_op);
2362 switch (this_code)
2364 case PLUS:
2365 case MINUS:
2366 if (n_ops == 7)
2367 return NULL_RTX;
2369 ops[n_ops].op = XEXP (this_op, 1);
2370 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2371 n_ops++;
2373 ops[i].op = XEXP (this_op, 0);
2374 input_ops++;
2375 changed = 1;
2376 break;
2378 case NEG:
2379 ops[i].op = XEXP (this_op, 0);
2380 ops[i].neg = ! this_neg;
2381 changed = 1;
2382 break;
2384 case CONST:
2385 if (n_ops < 7
2386 && GET_CODE (XEXP (this_op, 0)) == PLUS
2387 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2388 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2390 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2391 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2392 ops[n_ops].neg = this_neg;
2393 n_ops++;
2394 input_consts++;
2395 changed = 1;
2397 break;
2399 case NOT:
2400 /* ~a -> (-a - 1) */
2401 if (n_ops != 7)
2403 ops[n_ops].op = constm1_rtx;
2404 ops[n_ops++].neg = this_neg;
2405 ops[i].op = XEXP (this_op, 0);
2406 ops[i].neg = !this_neg;
2407 changed = 1;
2409 break;
2411 case CONST_INT:
2412 if (this_neg)
2414 ops[i].op = neg_const_int (mode, this_op);
2415 ops[i].neg = 0;
2416 changed = 1;
2418 break;
2420 default:
2421 break;
2425 while (changed);
2427 /* If we only have two operands, we can't do anything. */
2428 if (n_ops <= 2 && !force)
2429 return NULL_RTX;
2431 /* Count the number of CONSTs we didn't split above. */
2432 for (i = 0; i < n_ops; i++)
2433 if (GET_CODE (ops[i].op) == CONST)
2434 input_consts++;
2436 /* Now simplify each pair of operands until nothing changes. The first
2437 time through just simplify constants against each other. */
2439 first = 1;
2442 changed = first;
2444 for (i = 0; i < n_ops - 1; i++)
2445 for (j = i + 1; j < n_ops; j++)
2447 rtx lhs = ops[i].op, rhs = ops[j].op;
2448 int lneg = ops[i].neg, rneg = ops[j].neg;
2450 if (lhs != 0 && rhs != 0
2451 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2453 enum rtx_code ncode = PLUS;
2455 if (lneg != rneg)
2457 ncode = MINUS;
2458 if (lneg)
2459 tem = lhs, lhs = rhs, rhs = tem;
2461 else if (swap_commutative_operands_p (lhs, rhs))
2462 tem = lhs, lhs = rhs, rhs = tem;
2464 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2466 /* Reject "simplifications" that just wrap the two
2467 arguments in a CONST. Failure to do so can result
2468 in infinite recursion with simplify_binary_operation
2469 when it calls us to simplify CONST operations. */
2470 if (tem
2471 && ! (GET_CODE (tem) == CONST
2472 && GET_CODE (XEXP (tem, 0)) == ncode
2473 && XEXP (XEXP (tem, 0), 0) == lhs
2474 && XEXP (XEXP (tem, 0), 1) == rhs)
2475 /* Don't allow -x + -1 -> ~x simplifications in the
2476 first pass. This allows us the chance to combine
2477 the -1 with other constants. */
2478 && ! (first
2479 && GET_CODE (tem) == NOT
2480 && XEXP (tem, 0) == rhs))
2482 lneg &= rneg;
2483 if (GET_CODE (tem) == NEG)
2484 tem = XEXP (tem, 0), lneg = !lneg;
2485 if (GET_CODE (tem) == CONST_INT && lneg)
2486 tem = neg_const_int (mode, tem), lneg = 0;
2488 ops[i].op = tem;
2489 ops[i].neg = lneg;
2490 ops[j].op = NULL_RTX;
2491 changed = 1;
2496 first = 0;
2498 while (changed);
2500 /* Pack all the operands to the lower-numbered entries. */
2501 for (i = 0, j = 0; j < n_ops; j++)
2502 if (ops[j].op)
2503 ops[i++] = ops[j];
2504 n_ops = i;
2506 /* Sort the operations based on swap_commutative_operands_p. */
2507 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2509 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2510 if (n_ops == 2
2511 && GET_CODE (ops[1].op) == CONST_INT
2512 && CONSTANT_P (ops[0].op)
2513 && ops[0].neg)
2514 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2516 /* We suppressed creation of trivial CONST expressions in the
2517 combination loop to avoid recursion. Create one manually now.
2518 The combination loop should have ensured that there is exactly
2519 one CONST_INT, and the sort will have ensured that it is last
2520 in the array and that any other constant will be next-to-last. */
2522 if (n_ops > 1
2523 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2524 && CONSTANT_P (ops[n_ops - 2].op))
2526 rtx value = ops[n_ops - 1].op;
2527 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2528 value = neg_const_int (mode, value);
2529 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2530 n_ops--;
2533 /* Count the number of CONSTs that we generated. */
2534 n_consts = 0;
2535 for (i = 0; i < n_ops; i++)
2536 if (GET_CODE (ops[i].op) == CONST)
2537 n_consts++;
2539 /* Give up if we didn't reduce the number of operands we had. Make
2540 sure we count a CONST as two operands. If we have the same
2541 number of operands, but have made more CONSTs than before, this
2542 is also an improvement, so accept it. */
2543 if (!force
2544 && (n_ops + n_consts > input_ops
2545 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2546 return NULL_RTX;
2548 /* Put a non-negated operand first, if possible. */
2550 for (i = 0; i < n_ops && ops[i].neg; i++)
2551 continue;
2552 if (i == n_ops)
2553 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2554 else if (i != 0)
2556 tem = ops[0].op;
2557 ops[0] = ops[i];
2558 ops[i].op = tem;
2559 ops[i].neg = 1;
2562 /* Now make the result by performing the requested operations. */
2563 result = ops[0].op;
2564 for (i = 1; i < n_ops; i++)
2565 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2566 mode, result, ops[i].op);
2568 return result;
2571 /* Like simplify_binary_operation except used for relational operators.
2572 MODE is the mode of the operands, not that of the result. If MODE
2573 is VOIDmode, both operands must also be VOIDmode and we compare the
2574 operands in "infinite precision".
2576 If no simplification is possible, this function returns zero. Otherwise,
2577 it returns either const_true_rtx or const0_rtx. */
2580 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2581 rtx op0, rtx op1)
2583 int equal, op0lt, op0ltu, op1lt, op1ltu;
2584 rtx tem;
2585 rtx trueop0;
2586 rtx trueop1;
2588 if (mode == VOIDmode
2589 && (GET_MODE (op0) != VOIDmode
2590 || GET_MODE (op1) != VOIDmode))
2591 abort ();
2593 /* If op0 is a compare, extract the comparison arguments from it. */
2594 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2595 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2597 trueop0 = avoid_constant_pool_reference (op0);
2598 trueop1 = avoid_constant_pool_reference (op1);
2600 /* We can't simplify MODE_CC values since we don't know what the
2601 actual comparison is. */
2602 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2603 return 0;
2605 /* Make sure the constant is second. */
2606 if (swap_commutative_operands_p (trueop0, trueop1))
2608 tem = op0, op0 = op1, op1 = tem;
2609 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2610 code = swap_condition (code);
2613 /* For integer comparisons of A and B maybe we can simplify A - B and can
2614 then simplify a comparison of that with zero. If A and B are both either
2615 a register or a CONST_INT, this can't help; testing for these cases will
2616 prevent infinite recursion here and speed things up.
2618 If CODE is an unsigned comparison, then we can never do this optimization,
2619 because it gives an incorrect result if the subtraction wraps around zero.
2620 ANSI C defines unsigned operations such that they never overflow, and
2621 thus such cases can not be ignored. */
2623 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2624 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2625 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2626 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2627 /* We cannot do this for == or != if tem is a nonzero address. */
2628 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2629 && code != GTU && code != GEU && code != LTU && code != LEU)
2630 return simplify_relational_operation (signed_condition (code),
2631 mode, tem, const0_rtx);
2633 if (flag_unsafe_math_optimizations && code == ORDERED)
2634 return const_true_rtx;
2636 if (flag_unsafe_math_optimizations && code == UNORDERED)
2637 return const0_rtx;
2639 /* For modes without NaNs, if the two operands are equal, we know the
2640 result except if they have side-effects. */
2641 if (! HONOR_NANS (GET_MODE (trueop0))
2642 && rtx_equal_p (trueop0, trueop1)
2643 && ! side_effects_p (trueop0))
2644 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2646 /* If the operands are floating-point constants, see if we can fold
2647 the result. */
2648 else if (GET_CODE (trueop0) == CONST_DOUBLE
2649 && GET_CODE (trueop1) == CONST_DOUBLE
2650 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2652 REAL_VALUE_TYPE d0, d1;
2654 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2655 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2657 /* Comparisons are unordered iff at least one of the values is NaN. */
2658 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2659 switch (code)
2661 case UNEQ:
2662 case UNLT:
2663 case UNGT:
2664 case UNLE:
2665 case UNGE:
2666 case NE:
2667 case UNORDERED:
2668 return const_true_rtx;
2669 case EQ:
2670 case LT:
2671 case GT:
2672 case LE:
2673 case GE:
2674 case LTGT:
2675 case ORDERED:
2676 return const0_rtx;
2677 default:
2678 return 0;
2681 equal = REAL_VALUES_EQUAL (d0, d1);
2682 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2683 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2686 /* Otherwise, see if the operands are both integers. */
2687 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2688 && (GET_CODE (trueop0) == CONST_DOUBLE
2689 || GET_CODE (trueop0) == CONST_INT)
2690 && (GET_CODE (trueop1) == CONST_DOUBLE
2691 || GET_CODE (trueop1) == CONST_INT))
2693 int width = GET_MODE_BITSIZE (mode);
2694 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2695 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2697 /* Get the two words comprising each integer constant. */
2698 if (GET_CODE (trueop0) == CONST_DOUBLE)
2700 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2701 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2703 else
2705 l0u = l0s = INTVAL (trueop0);
2706 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2709 if (GET_CODE (trueop1) == CONST_DOUBLE)
2711 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2712 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2714 else
2716 l1u = l1s = INTVAL (trueop1);
2717 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2720 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2721 we have to sign or zero-extend the values. */
2722 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2724 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2725 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2727 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2728 l0s |= ((HOST_WIDE_INT) (-1) << width);
2730 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2731 l1s |= ((HOST_WIDE_INT) (-1) << width);
2733 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2734 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2736 equal = (h0u == h1u && l0u == l1u);
2737 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2738 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2739 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2740 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2743 /* Otherwise, there are some code-specific tests we can make. */
2744 else
2746 switch (code)
2748 case EQ:
2749 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2750 return const0_rtx;
2751 break;
2753 case NE:
2754 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2755 return const_true_rtx;
2756 break;
2758 case GEU:
2759 /* Unsigned values are never negative. */
2760 if (trueop1 == const0_rtx)
2761 return const_true_rtx;
2762 break;
2764 case LTU:
2765 if (trueop1 == const0_rtx)
2766 return const0_rtx;
2767 break;
2769 case LEU:
2770 /* Unsigned values are never greater than the largest
2771 unsigned value. */
2772 if (GET_CODE (trueop1) == CONST_INT
2773 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2774 && INTEGRAL_MODE_P (mode))
2775 return const_true_rtx;
2776 break;
2778 case GTU:
2779 if (GET_CODE (trueop1) == CONST_INT
2780 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2781 && INTEGRAL_MODE_P (mode))
2782 return const0_rtx;
2783 break;
2785 case LT:
2786 /* Optimize abs(x) < 0.0. */
2787 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2789 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2790 : trueop0;
2791 if (GET_CODE (tem) == ABS)
2792 return const0_rtx;
2794 break;
2796 case GE:
2797 /* Optimize abs(x) >= 0.0. */
2798 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2800 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2801 : trueop0;
2802 if (GET_CODE (tem) == ABS)
2803 return const_true_rtx;
2805 break;
2807 case UNGE:
2808 /* Optimize ! (abs(x) < 0.0). */
2809 if (trueop1 == CONST0_RTX (mode))
2811 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2812 : trueop0;
2813 if (GET_CODE (tem) == ABS)
2814 return const_true_rtx;
2816 break;
2818 default:
2819 break;
2822 return 0;
2825 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2826 as appropriate. */
2827 switch (code)
2829 case EQ:
2830 case UNEQ:
2831 return equal ? const_true_rtx : const0_rtx;
2832 case NE:
2833 case LTGT:
2834 return ! equal ? const_true_rtx : const0_rtx;
2835 case LT:
2836 case UNLT:
2837 return op0lt ? const_true_rtx : const0_rtx;
2838 case GT:
2839 case UNGT:
2840 return op1lt ? const_true_rtx : const0_rtx;
2841 case LTU:
2842 return op0ltu ? const_true_rtx : const0_rtx;
2843 case GTU:
2844 return op1ltu ? const_true_rtx : const0_rtx;
2845 case LE:
2846 case UNLE:
2847 return equal || op0lt ? const_true_rtx : const0_rtx;
2848 case GE:
2849 case UNGE:
2850 return equal || op1lt ? const_true_rtx : const0_rtx;
2851 case LEU:
2852 return equal || op0ltu ? const_true_rtx : const0_rtx;
2853 case GEU:
2854 return equal || op1ltu ? const_true_rtx : const0_rtx;
2855 case ORDERED:
2856 return const_true_rtx;
2857 case UNORDERED:
2858 return const0_rtx;
2859 default:
2860 abort ();
2864 /* Simplify CODE, an operation with result mode MODE and three operands,
2865 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2866 a constant. Return 0 if no simplifications is possible. */
2869 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2870 enum machine_mode op0_mode, rtx op0, rtx op1,
2871 rtx op2)
2873 unsigned int width = GET_MODE_BITSIZE (mode);
2875 /* VOIDmode means "infinite" precision. */
2876 if (width == 0)
2877 width = HOST_BITS_PER_WIDE_INT;
2879 switch (code)
2881 case SIGN_EXTRACT:
2882 case ZERO_EXTRACT:
2883 if (GET_CODE (op0) == CONST_INT
2884 && GET_CODE (op1) == CONST_INT
2885 && GET_CODE (op2) == CONST_INT
2886 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2887 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2889 /* Extracting a bit-field from a constant */
2890 HOST_WIDE_INT val = INTVAL (op0);
2892 if (BITS_BIG_ENDIAN)
2893 val >>= (GET_MODE_BITSIZE (op0_mode)
2894 - INTVAL (op2) - INTVAL (op1));
2895 else
2896 val >>= INTVAL (op2);
2898 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2900 /* First zero-extend. */
2901 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2902 /* If desired, propagate sign bit. */
2903 if (code == SIGN_EXTRACT
2904 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2905 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2908 /* Clear the bits that don't belong in our mode,
2909 unless they and our sign bit are all one.
2910 So we get either a reasonable negative value or a reasonable
2911 unsigned value for this mode. */
2912 if (width < HOST_BITS_PER_WIDE_INT
2913 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2914 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2915 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2917 return GEN_INT (val);
2919 break;
2921 case IF_THEN_ELSE:
2922 if (GET_CODE (op0) == CONST_INT)
2923 return op0 != const0_rtx ? op1 : op2;
2925 /* Convert c ? a : a into "a". */
2926 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2927 return op1;
2929 /* Convert a != b ? a : b into "a". */
2930 if (GET_CODE (op0) == NE
2931 && ! side_effects_p (op0)
2932 && ! HONOR_NANS (mode)
2933 && ! HONOR_SIGNED_ZEROS (mode)
2934 && ((rtx_equal_p (XEXP (op0, 0), op1)
2935 && rtx_equal_p (XEXP (op0, 1), op2))
2936 || (rtx_equal_p (XEXP (op0, 0), op2)
2937 && rtx_equal_p (XEXP (op0, 1), op1))))
2938 return op1;
2940 /* Convert a == b ? a : b into "b". */
2941 if (GET_CODE (op0) == EQ
2942 && ! side_effects_p (op0)
2943 && ! HONOR_NANS (mode)
2944 && ! HONOR_SIGNED_ZEROS (mode)
2945 && ((rtx_equal_p (XEXP (op0, 0), op1)
2946 && rtx_equal_p (XEXP (op0, 1), op2))
2947 || (rtx_equal_p (XEXP (op0, 0), op2)
2948 && rtx_equal_p (XEXP (op0, 1), op1))))
2949 return op2;
2951 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2953 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2954 ? GET_MODE (XEXP (op0, 1))
2955 : GET_MODE (XEXP (op0, 0)));
2956 rtx temp;
2957 if (cmp_mode == VOIDmode)
2958 cmp_mode = op0_mode;
2959 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2960 XEXP (op0, 0), XEXP (op0, 1));
2962 /* See if any simplifications were possible. */
2963 if (temp == const0_rtx)
2964 return op2;
2965 else if (temp == const_true_rtx)
2966 return op1;
2967 else if (temp)
2968 abort ();
2970 /* Look for happy constants in op1 and op2. */
2971 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2973 HOST_WIDE_INT t = INTVAL (op1);
2974 HOST_WIDE_INT f = INTVAL (op2);
2976 if (t == STORE_FLAG_VALUE && f == 0)
2977 code = GET_CODE (op0);
2978 else if (t == 0 && f == STORE_FLAG_VALUE)
2980 enum rtx_code tmp;
2981 tmp = reversed_comparison_code (op0, NULL_RTX);
2982 if (tmp == UNKNOWN)
2983 break;
2984 code = tmp;
2986 else
2987 break;
2989 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2992 break;
2994 case VEC_MERGE:
2995 if (GET_MODE (op0) != mode
2996 || GET_MODE (op1) != mode
2997 || !VECTOR_MODE_P (mode))
2998 abort ();
2999 op2 = avoid_constant_pool_reference (op2);
3000 if (GET_CODE (op2) == CONST_INT)
3002 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3003 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3004 int mask = (1 << n_elts) - 1;
3006 if (!(INTVAL (op2) & mask))
3007 return op1;
3008 if ((INTVAL (op2) & mask) == mask)
3009 return op0;
3011 op0 = avoid_constant_pool_reference (op0);
3012 op1 = avoid_constant_pool_reference (op1);
3013 if (GET_CODE (op0) == CONST_VECTOR
3014 && GET_CODE (op1) == CONST_VECTOR)
3016 rtvec v = rtvec_alloc (n_elts);
3017 unsigned int i;
3019 for (i = 0; i < n_elts; i++)
3020 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3021 ? CONST_VECTOR_ELT (op0, i)
3022 : CONST_VECTOR_ELT (op1, i));
3023 return gen_rtx_CONST_VECTOR (mode, v);
3026 break;
3028 default:
3029 abort ();
3032 return 0;
3035 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3036 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3038 Works by unpacking OP into a collection of 8-bit values
3039 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3040 and then repacking them again for OUTERMODE. */
3042 static rtx
3043 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3044 enum machine_mode innermode, unsigned int byte)
3046 /* We support up to 512-bit values (for V8DFmode). */
3047 enum {
3048 max_bitsize = 512,
3049 value_bit = 8,
3050 value_mask = (1 << value_bit) - 1
3052 unsigned char value[max_bitsize / value_bit];
3053 int value_start;
3054 int i;
3055 int elem;
3057 int num_elem;
3058 rtx * elems;
3059 int elem_bitsize;
3060 rtx result_s;
3061 rtvec result_v = NULL;
3062 enum mode_class outer_class;
3063 enum machine_mode outer_submode;
3065 /* Some ports misuse CCmode. */
3066 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3067 return op;
3069 /* Unpack the value. */
3071 if (GET_CODE (op) == CONST_VECTOR)
3073 num_elem = CONST_VECTOR_NUNITS (op);
3074 elems = &CONST_VECTOR_ELT (op, 0);
3075 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3077 else
3079 num_elem = 1;
3080 elems = &op;
3081 elem_bitsize = max_bitsize;
3084 if (BITS_PER_UNIT % value_bit != 0)
3085 abort (); /* Too complicated; reducing value_bit may help. */
3086 if (elem_bitsize % BITS_PER_UNIT != 0)
3087 abort (); /* I don't know how to handle endianness of sub-units. */
3089 for (elem = 0; elem < num_elem; elem++)
3091 unsigned char * vp;
3092 rtx el = elems[elem];
3094 /* Vectors are kept in target memory order. (This is probably
3095 a mistake.) */
3097 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3098 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3099 / BITS_PER_UNIT);
3100 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3101 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3102 unsigned bytele = (subword_byte % UNITS_PER_WORD
3103 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3104 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3107 switch (GET_CODE (el))
3109 case CONST_INT:
3110 for (i = 0;
3111 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3112 i += value_bit)
3113 *vp++ = INTVAL (el) >> i;
3114 /* CONST_INTs are always logically sign-extended. */
3115 for (; i < elem_bitsize; i += value_bit)
3116 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3117 break;
3119 case CONST_DOUBLE:
3120 if (GET_MODE (el) == VOIDmode)
3122 /* If this triggers, someone should have generated a
3123 CONST_INT instead. */
3124 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3125 abort ();
3127 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3128 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3129 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3131 *vp++
3132 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3133 i += value_bit;
3135 /* It shouldn't matter what's done here, so fill it with
3136 zero. */
3137 for (; i < elem_bitsize; i += value_bit)
3138 *vp++ = 0;
3140 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3142 long tmp[max_bitsize / 32];
3143 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3145 if (bitsize > elem_bitsize)
3146 abort ();
3147 if (bitsize % value_bit != 0)
3148 abort ();
3150 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3151 GET_MODE (el));
3153 /* real_to_target produces its result in words affected by
3154 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3155 and use WORDS_BIG_ENDIAN instead; see the documentation
3156 of SUBREG in rtl.texi. */
3157 for (i = 0; i < bitsize; i += value_bit)
3159 int ibase;
3160 if (WORDS_BIG_ENDIAN)
3161 ibase = bitsize - 1 - i;
3162 else
3163 ibase = i;
3164 *vp++ = tmp[ibase / 32] >> i % 32;
3167 /* It shouldn't matter what's done here, so fill it with
3168 zero. */
3169 for (; i < elem_bitsize; i += value_bit)
3170 *vp++ = 0;
3172 else
3173 abort ();
3174 break;
3176 default:
3177 abort ();
3181 /* Now, pick the right byte to start with. */
3182 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3183 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3184 will already have offset 0. */
3185 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3187 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3188 - byte);
3189 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3190 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3191 byte = (subword_byte % UNITS_PER_WORD
3192 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3195 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3196 so if it's become negative it will instead be very large.) */
3197 if (byte >= GET_MODE_SIZE (innermode))
3198 abort ();
3200 /* Convert from bytes to chunks of size value_bit. */
3201 value_start = byte * (BITS_PER_UNIT / value_bit);
3203 /* Re-pack the value. */
3205 if (VECTOR_MODE_P (outermode))
3207 num_elem = GET_MODE_NUNITS (outermode);
3208 result_v = rtvec_alloc (num_elem);
3209 elems = &RTVEC_ELT (result_v, 0);
3210 outer_submode = GET_MODE_INNER (outermode);
3212 else
3214 num_elem = 1;
3215 elems = &result_s;
3216 outer_submode = outermode;
3219 outer_class = GET_MODE_CLASS (outer_submode);
3220 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3222 if (elem_bitsize % value_bit != 0)
3223 abort ();
3224 if (elem_bitsize + value_start * value_bit > max_bitsize)
3225 abort ();
3227 for (elem = 0; elem < num_elem; elem++)
3229 unsigned char *vp;
3231 /* Vectors are stored in target memory order. (This is probably
3232 a mistake.) */
3234 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3235 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3236 / BITS_PER_UNIT);
3237 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3238 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3239 unsigned bytele = (subword_byte % UNITS_PER_WORD
3240 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3241 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3244 switch (outer_class)
3246 case MODE_INT:
3247 case MODE_PARTIAL_INT:
3249 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3251 for (i = 0;
3252 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3253 i += value_bit)
3254 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3255 for (; i < elem_bitsize; i += value_bit)
3256 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3257 << (i - HOST_BITS_PER_WIDE_INT));
3259 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3260 know why. */
3261 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3262 elems[elem] = gen_int_mode (lo, outer_submode);
3263 else
3264 elems[elem] = immed_double_const (lo, hi, outer_submode);
3266 break;
3268 case MODE_FLOAT:
3270 REAL_VALUE_TYPE r;
3271 long tmp[max_bitsize / 32];
3273 /* real_from_target wants its input in words affected by
3274 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3275 and use WORDS_BIG_ENDIAN instead; see the documentation
3276 of SUBREG in rtl.texi. */
3277 for (i = 0; i < max_bitsize / 32; i++)
3278 tmp[i] = 0;
3279 for (i = 0; i < elem_bitsize; i += value_bit)
3281 int ibase;
3282 if (WORDS_BIG_ENDIAN)
3283 ibase = elem_bitsize - 1 - i;
3284 else
3285 ibase = i;
3286 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3289 real_from_target (&r, tmp, outer_submode);
3290 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3292 break;
3294 default:
3295 abort ();
3298 if (VECTOR_MODE_P (outermode))
3299 return gen_rtx_CONST_VECTOR (outermode, result_v);
3300 else
3301 return result_s;
3304 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3305 Return 0 if no simplifications are possible. */
3307 simplify_subreg (enum machine_mode outermode, rtx op,
3308 enum machine_mode innermode, unsigned int byte)
3310 /* Little bit of sanity checking. */
3311 if (innermode == VOIDmode || outermode == VOIDmode
3312 || innermode == BLKmode || outermode == BLKmode)
3313 abort ();
3315 if (GET_MODE (op) != innermode
3316 && GET_MODE (op) != VOIDmode)
3317 abort ();
3319 if (byte % GET_MODE_SIZE (outermode)
3320 || byte >= GET_MODE_SIZE (innermode))
3321 abort ();
3323 if (outermode == innermode && !byte)
3324 return op;
3326 if (GET_CODE (op) == CONST_INT
3327 || GET_CODE (op) == CONST_DOUBLE
3328 || GET_CODE (op) == CONST_VECTOR)
3329 return simplify_immed_subreg (outermode, op, innermode, byte);
3331 /* Changing mode twice with SUBREG => just change it once,
3332 or not at all if changing back op starting mode. */
3333 if (GET_CODE (op) == SUBREG)
3335 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3336 int final_offset = byte + SUBREG_BYTE (op);
3337 rtx new;
3339 if (outermode == innermostmode
3340 && byte == 0 && SUBREG_BYTE (op) == 0)
3341 return SUBREG_REG (op);
3343 /* The SUBREG_BYTE represents offset, as if the value were stored
3344 in memory. Irritating exception is paradoxical subreg, where
3345 we define SUBREG_BYTE to be 0. On big endian machines, this
3346 value should be negative. For a moment, undo this exception. */
3347 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3349 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3350 if (WORDS_BIG_ENDIAN)
3351 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3352 if (BYTES_BIG_ENDIAN)
3353 final_offset += difference % UNITS_PER_WORD;
3355 if (SUBREG_BYTE (op) == 0
3356 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3358 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3359 if (WORDS_BIG_ENDIAN)
3360 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3361 if (BYTES_BIG_ENDIAN)
3362 final_offset += difference % UNITS_PER_WORD;
3365 /* See whether resulting subreg will be paradoxical. */
3366 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3368 /* In nonparadoxical subregs we can't handle negative offsets. */
3369 if (final_offset < 0)
3370 return NULL_RTX;
3371 /* Bail out in case resulting subreg would be incorrect. */
3372 if (final_offset % GET_MODE_SIZE (outermode)
3373 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3374 return NULL_RTX;
3376 else
3378 int offset = 0;
3379 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3381 /* In paradoxical subreg, see if we are still looking on lower part.
3382 If so, our SUBREG_BYTE will be 0. */
3383 if (WORDS_BIG_ENDIAN)
3384 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3385 if (BYTES_BIG_ENDIAN)
3386 offset += difference % UNITS_PER_WORD;
3387 if (offset == final_offset)
3388 final_offset = 0;
3389 else
3390 return NULL_RTX;
3393 /* Recurse for further possible simplifications. */
3394 new = simplify_subreg (outermode, SUBREG_REG (op),
3395 GET_MODE (SUBREG_REG (op)),
3396 final_offset);
3397 if (new)
3398 return new;
3399 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3402 /* SUBREG of a hard register => just change the register number
3403 and/or mode. If the hard register is not valid in that mode,
3404 suppress this simplification. If the hard register is the stack,
3405 frame, or argument pointer, leave this as a SUBREG. */
3407 if (REG_P (op)
3408 && (! REG_FUNCTION_VALUE_P (op)
3409 || ! rtx_equal_function_value_matters)
3410 && REGNO (op) < FIRST_PSEUDO_REGISTER
3411 #ifdef CANNOT_CHANGE_MODE_CLASS
3412 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3413 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3414 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3415 #endif
3416 && ((reload_completed && !frame_pointer_needed)
3417 || (REGNO (op) != FRAME_POINTER_REGNUM
3418 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3419 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3420 #endif
3422 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3423 && REGNO (op) != ARG_POINTER_REGNUM
3424 #endif
3425 && REGNO (op) != STACK_POINTER_REGNUM
3426 && subreg_offset_representable_p (REGNO (op), innermode,
3427 byte, outermode))
3429 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3430 int final_regno = subreg_hard_regno (tem, 0);
3432 /* ??? We do allow it if the current REG is not valid for
3433 its mode. This is a kludge to work around how float/complex
3434 arguments are passed on 32-bit SPARC and should be fixed. */
3435 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3436 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3438 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3440 /* Propagate original regno. We don't have any way to specify
3441 the offset inside original regno, so do so only for lowpart.
3442 The information is used only by alias analysis that can not
3443 grog partial register anyway. */
3445 if (subreg_lowpart_offset (outermode, innermode) == byte)
3446 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3447 return x;
3451 /* If we have a SUBREG of a register that we are replacing and we are
3452 replacing it with a MEM, make a new MEM and try replacing the
3453 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3454 or if we would be widening it. */
3456 if (GET_CODE (op) == MEM
3457 && ! mode_dependent_address_p (XEXP (op, 0))
3458 /* Allow splitting of volatile memory references in case we don't
3459 have instruction to move the whole thing. */
3460 && (! MEM_VOLATILE_P (op)
3461 || ! have_insn_for (SET, innermode))
3462 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3463 return adjust_address_nv (op, outermode, byte);
3465 /* Handle complex values represented as CONCAT
3466 of real and imaginary part. */
3467 if (GET_CODE (op) == CONCAT)
3469 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3470 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3471 unsigned int final_offset;
3472 rtx res;
3474 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3475 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3476 if (res)
3477 return res;
3478 /* We can at least simplify it by referring directly to the relevant part. */
3479 return gen_rtx_SUBREG (outermode, part, final_offset);
3482 return NULL_RTX;
3485 /* Make a SUBREG operation or equivalent if it folds. */
3488 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3489 enum machine_mode innermode, unsigned int byte)
3491 rtx new;
3492 /* Little bit of sanity checking. */
3493 if (innermode == VOIDmode || outermode == VOIDmode
3494 || innermode == BLKmode || outermode == BLKmode)
3495 abort ();
3497 if (GET_MODE (op) != innermode
3498 && GET_MODE (op) != VOIDmode)
3499 abort ();
3501 if (byte % GET_MODE_SIZE (outermode)
3502 || byte >= GET_MODE_SIZE (innermode))
3503 abort ();
3505 if (GET_CODE (op) == QUEUED)
3506 return NULL_RTX;
3508 new = simplify_subreg (outermode, op, innermode, byte);
3509 if (new)
3510 return new;
3512 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3513 return NULL_RTX;
3515 return gen_rtx_SUBREG (outermode, op, byte);
3517 /* Simplify X, an rtx expression.
3519 Return the simplified expression or NULL if no simplifications
3520 were possible.
3522 This is the preferred entry point into the simplification routines;
3523 however, we still allow passes to call the more specific routines.
3525 Right now GCC has three (yes, three) major bodies of RTL simplification
3526 code that need to be unified.
3528 1. fold_rtx in cse.c. This code uses various CSE specific
3529 information to aid in RTL simplification.
3531 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3532 it uses combine specific information to aid in RTL
3533 simplification.
3535 3. The routines in this file.
3538 Long term we want to only have one body of simplification code; to
3539 get to that state I recommend the following steps:
3541 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3542 which are not pass dependent state into these routines.
3544 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3545 use this routine whenever possible.
3547 3. Allow for pass dependent state to be provided to these
3548 routines and add simplifications based on the pass dependent
3549 state. Remove code from cse.c & combine.c that becomes
3550 redundant/dead.
3552 It will take time, but ultimately the compiler will be easier to
3553 maintain and improve. It's totally silly that when we add a
3554 simplification that it needs to be added to 4 places (3 for RTL
3555 simplification and 1 for tree simplification. */
3558 simplify_rtx (rtx x)
3560 enum rtx_code code = GET_CODE (x);
3561 enum machine_mode mode = GET_MODE (x);
3562 rtx temp;
3564 switch (GET_RTX_CLASS (code))
3566 case '1':
3567 return simplify_unary_operation (code, mode,
3568 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3569 case 'c':
3570 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3571 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3573 /* Fall through.... */
3575 case '2':
3576 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3578 case '3':
3579 case 'b':
3580 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3581 XEXP (x, 0), XEXP (x, 1),
3582 XEXP (x, 2));
3584 case '<':
3585 if (VECTOR_MODE_P (mode))
3586 return NULL_RTX;
3587 temp = simplify_relational_operation (code,
3588 ((GET_MODE (XEXP (x, 0))
3589 != VOIDmode)
3590 ? GET_MODE (XEXP (x, 0))
3591 : GET_MODE (XEXP (x, 1))),
3592 XEXP (x, 0), XEXP (x, 1));
3593 #ifdef FLOAT_STORE_FLAG_VALUE
3594 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3596 if (temp == const0_rtx)
3597 temp = CONST0_RTX (mode);
3598 else
3599 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3600 mode);
3602 #endif
3603 return temp;
3605 case 'x':
3606 if (code == SUBREG)
3607 return simplify_gen_subreg (mode, SUBREG_REG (x),
3608 GET_MODE (SUBREG_REG (x)),
3609 SUBREG_BYTE (x));
3610 if (code == CONSTANT_P_RTX)
3612 if (CONSTANT_P (XEXP (x, 0)))
3613 return const1_rtx;
3615 break;
3617 case 'o':
3618 if (code == LO_SUM)
3620 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3621 if (GET_CODE (XEXP (x, 0)) == HIGH
3622 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3623 return XEXP (x, 1);
3625 break;
3627 default:
3628 break;
3630 return NULL;