PR c++/3478
[official-gcc.git] / gcc / simplify-rtx.c
blobddf732bdbd8437538b14c8152704c6653a500a8f
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57 unsigned int);
58 static bool associative_constant_p (rtx);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
64 static rtx
65 neg_const_int (enum machine_mode mode, rtx i)
67 return gen_int_mode (- INTVAL (i), mode);
71 /* Make a binary operation by properly ordering the operands and
72 seeing if the expression folds. */
74 rtx
75 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
76 rtx op1)
78 rtx tem;
80 /* Put complex operands first and constants second if commutative. */
81 if (GET_RTX_CLASS (code) == 'c'
82 && swap_commutative_operands_p (op0, op1))
83 tem = op0, op0 = op1, op1 = tem;
85 /* If this simplifies, do it. */
86 tem = simplify_binary_operation (code, mode, op0, op1);
87 if (tem)
88 return tem;
90 /* Handle addition and subtraction specially. Otherwise, just form
91 the operation. */
93 if (code == PLUS || code == MINUS)
95 tem = simplify_plus_minus (code, mode, op0, op1, 1);
96 if (tem)
97 return tem;
100 return gen_rtx_fmt_ee (code, mode, op0, op1);
103 /* If X is a MEM referencing the constant pool, return the real value.
104 Otherwise return X. */
106 avoid_constant_pool_reference (rtx x)
108 rtx c, tmp, addr;
109 enum machine_mode cmode;
111 switch (GET_CODE (x))
113 case MEM:
114 break;
116 case FLOAT_EXTEND:
117 /* Handle float extensions of constant pool references. */
118 tmp = XEXP (x, 0);
119 c = avoid_constant_pool_reference (tmp);
120 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
122 REAL_VALUE_TYPE d;
124 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
125 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
127 return x;
129 default:
130 return x;
133 addr = XEXP (x, 0);
135 /* Call target hook to avoid the effects of -fpic etc.... */
136 addr = (*targetm.delegitimize_address) (addr);
138 if (GET_CODE (addr) == LO_SUM)
139 addr = XEXP (addr, 1);
141 if (GET_CODE (addr) != SYMBOL_REF
142 || ! CONSTANT_POOL_ADDRESS_P (addr))
143 return x;
145 c = get_pool_constant (addr);
146 cmode = get_pool_mode (addr);
148 /* If we're accessing the constant in a different mode than it was
149 originally stored, attempt to fix that up via subreg simplifications.
150 If that fails we have no choice but to return the original memory. */
151 if (cmode != GET_MODE (x))
153 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
154 return c ? c : x;
157 return c;
160 /* Make a unary operation by first seeing if it folds and otherwise making
161 the specified operation. */
164 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
165 enum machine_mode op_mode)
167 rtx tem;
169 /* If this simplifies, use it. */
170 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
171 return tem;
173 return gen_rtx_fmt_e (code, mode, op);
176 /* Likewise for ternary operations. */
179 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
180 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
182 rtx tem;
184 /* If this simplifies, use it. */
185 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
186 op0, op1, op2)))
187 return tem;
189 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
192 /* Likewise, for relational operations.
193 CMP_MODE specifies mode comparison is done in.
197 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
198 enum machine_mode cmp_mode, rtx op0, rtx op1)
200 rtx tem;
202 if (cmp_mode == VOIDmode)
203 cmp_mode = GET_MODE (op0);
204 if (cmp_mode == VOIDmode)
205 cmp_mode = GET_MODE (op1);
207 if (cmp_mode != VOIDmode)
209 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
211 if (tem)
213 #ifdef FLOAT_STORE_FLAG_VALUE
214 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
216 REAL_VALUE_TYPE val;
217 if (tem == const0_rtx)
218 return CONST0_RTX (mode);
219 if (tem != const_true_rtx)
220 abort ();
221 val = FLOAT_STORE_FLAG_VALUE (mode);
222 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
224 #endif
225 return tem;
229 /* For the following tests, ensure const0_rtx is op1. */
230 if (swap_commutative_operands_p (op0, op1)
231 || (op0 == const0_rtx && op1 != const0_rtx))
232 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
234 /* If op0 is a compare, extract the comparison arguments from it. */
235 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
236 return simplify_gen_relational (code, mode, VOIDmode,
237 XEXP (op0, 0), XEXP (op0, 1));
239 /* If op0 is a comparison, extract the comparison arguments form it. */
240 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
242 if (code == NE)
244 if (GET_MODE (op0) == mode)
245 return op0;
246 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
247 XEXP (op0, 0), XEXP (op0, 1));
249 else if (code == EQ)
251 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
252 if (new != UNKNOWN)
253 return simplify_gen_relational (new, mode, VOIDmode,
254 XEXP (op0, 0), XEXP (op0, 1));
258 return gen_rtx_fmt_ee (code, mode, op0, op1);
261 /* Replace all occurrences of OLD in X with NEW and try to simplify the
262 resulting RTX. Return a new RTX which is as simplified as possible. */
265 simplify_replace_rtx (rtx x, rtx old, rtx new)
267 enum rtx_code code = GET_CODE (x);
268 enum machine_mode mode = GET_MODE (x);
269 enum machine_mode op_mode;
270 rtx op0, op1, op2;
272 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
273 to build a new expression substituting recursively. If we can't do
274 anything, return our input. */
276 if (x == old)
277 return new;
279 switch (GET_RTX_CLASS (code))
281 case '1':
282 op0 = XEXP (x, 0);
283 op_mode = GET_MODE (op0);
284 op0 = simplify_replace_rtx (op0, old, new);
285 if (op0 == XEXP (x, 0))
286 return x;
287 return simplify_gen_unary (code, mode, op0, op_mode);
289 case '2':
290 case 'c':
291 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
292 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
293 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
294 return x;
295 return simplify_gen_binary (code, mode, op0, op1);
297 case '<':
298 op0 = XEXP (x, 0);
299 op1 = XEXP (x, 1);
300 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
301 op0 = simplify_replace_rtx (op0, old, new);
302 op1 = simplify_replace_rtx (op1, old, new);
303 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
304 return x;
305 return simplify_gen_relational (code, mode, op_mode, op0, op1);
307 case '3':
308 case 'b':
309 op0 = XEXP (x, 0);
310 op_mode = GET_MODE (op0);
311 op0 = simplify_replace_rtx (op0, old, new);
312 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
313 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
314 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
315 return x;
316 if (op_mode == VOIDmode)
317 op_mode = GET_MODE (op0);
318 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
320 case 'x':
321 /* The only case we try to handle is a SUBREG. */
322 if (code == SUBREG)
324 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
325 if (op0 == SUBREG_REG (x))
326 return x;
327 op0 = simplify_gen_subreg (GET_MODE (x), op0,
328 GET_MODE (SUBREG_REG (x)),
329 SUBREG_BYTE (x));
330 return op0 ? op0 : x;
332 break;
334 case 'o':
335 if (code == MEM)
337 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
338 if (op0 == XEXP (x, 0))
339 return x;
340 return replace_equiv_address_nv (x, op0);
342 else if (code == LO_SUM)
344 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
345 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
347 /* (lo_sum (high x) x) -> x */
348 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
349 return op1;
351 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
352 return x;
353 return gen_rtx_LO_SUM (mode, op0, op1);
355 else if (code == REG)
357 if (REG_P (old) && REGNO (x) == REGNO (old))
358 return new;
360 break;
362 default:
363 break;
365 return x;
368 /* Try to simplify a unary operation CODE whose output mode is to be
369 MODE with input operand OP whose mode was originally OP_MODE.
370 Return zero if no simplification can be made. */
372 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
373 rtx op, enum machine_mode op_mode)
375 unsigned int width = GET_MODE_BITSIZE (mode);
376 rtx trueop = avoid_constant_pool_reference (op);
378 if (code == VEC_DUPLICATE)
380 if (!VECTOR_MODE_P (mode))
381 abort ();
382 if (GET_MODE (trueop) != VOIDmode
383 && !VECTOR_MODE_P (GET_MODE (trueop))
384 && GET_MODE_INNER (mode) != GET_MODE (trueop))
385 abort ();
386 if (GET_MODE (trueop) != VOIDmode
387 && VECTOR_MODE_P (GET_MODE (trueop))
388 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
389 abort ();
390 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
391 || GET_CODE (trueop) == CONST_VECTOR)
393 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
394 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
395 rtvec v = rtvec_alloc (n_elts);
396 unsigned int i;
398 if (GET_CODE (trueop) != CONST_VECTOR)
399 for (i = 0; i < n_elts; i++)
400 RTVEC_ELT (v, i) = trueop;
401 else
403 enum machine_mode inmode = GET_MODE (trueop);
404 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
405 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
407 if (in_n_elts >= n_elts || n_elts % in_n_elts)
408 abort ();
409 for (i = 0; i < n_elts; i++)
410 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
412 return gen_rtx_CONST_VECTOR (mode, v);
415 else if (GET_CODE (op) == CONST)
416 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
418 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
420 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
421 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
422 enum machine_mode opmode = GET_MODE (trueop);
423 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
424 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
425 rtvec v = rtvec_alloc (n_elts);
426 unsigned int i;
428 if (op_n_elts != n_elts)
429 abort ();
431 for (i = 0; i < n_elts; i++)
433 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
434 CONST_VECTOR_ELT (trueop, i),
435 GET_MODE_INNER (opmode));
436 if (!x)
437 return 0;
438 RTVEC_ELT (v, i) = x;
440 return gen_rtx_CONST_VECTOR (mode, v);
443 /* The order of these tests is critical so that, for example, we don't
444 check the wrong mode (input vs. output) for a conversion operation,
445 such as FIX. At some point, this should be simplified. */
447 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
448 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
450 HOST_WIDE_INT hv, lv;
451 REAL_VALUE_TYPE d;
453 if (GET_CODE (trueop) == CONST_INT)
454 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
455 else
456 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
458 REAL_VALUE_FROM_INT (d, lv, hv, mode);
459 d = real_value_truncate (mode, d);
460 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
462 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
463 && (GET_CODE (trueop) == CONST_DOUBLE
464 || GET_CODE (trueop) == CONST_INT))
466 HOST_WIDE_INT hv, lv;
467 REAL_VALUE_TYPE d;
469 if (GET_CODE (trueop) == CONST_INT)
470 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
471 else
472 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
474 if (op_mode == VOIDmode)
476 /* We don't know how to interpret negative-looking numbers in
477 this case, so don't try to fold those. */
478 if (hv < 0)
479 return 0;
481 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
483 else
484 hv = 0, lv &= GET_MODE_MASK (op_mode);
486 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
487 d = real_value_truncate (mode, d);
488 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
491 if (GET_CODE (trueop) == CONST_INT
492 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
494 HOST_WIDE_INT arg0 = INTVAL (trueop);
495 HOST_WIDE_INT val;
497 switch (code)
499 case NOT:
500 val = ~ arg0;
501 break;
503 case NEG:
504 val = - arg0;
505 break;
507 case ABS:
508 val = (arg0 >= 0 ? arg0 : - arg0);
509 break;
511 case FFS:
512 /* Don't use ffs here. Instead, get low order bit and then its
513 number. If arg0 is zero, this will return 0, as desired. */
514 arg0 &= GET_MODE_MASK (mode);
515 val = exact_log2 (arg0 & (- arg0)) + 1;
516 break;
518 case CLZ:
519 arg0 &= GET_MODE_MASK (mode);
520 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
522 else
523 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
524 break;
526 case CTZ:
527 arg0 &= GET_MODE_MASK (mode);
528 if (arg0 == 0)
530 /* Even if the value at zero is undefined, we have to come
531 up with some replacement. Seems good enough. */
532 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
533 val = GET_MODE_BITSIZE (mode);
535 else
536 val = exact_log2 (arg0 & -arg0);
537 break;
539 case POPCOUNT:
540 arg0 &= GET_MODE_MASK (mode);
541 val = 0;
542 while (arg0)
543 val++, arg0 &= arg0 - 1;
544 break;
546 case PARITY:
547 arg0 &= GET_MODE_MASK (mode);
548 val = 0;
549 while (arg0)
550 val++, arg0 &= arg0 - 1;
551 val &= 1;
552 break;
554 case TRUNCATE:
555 val = arg0;
556 break;
558 case ZERO_EXTEND:
559 /* When zero-extending a CONST_INT, we need to know its
560 original mode. */
561 if (op_mode == VOIDmode)
562 abort ();
563 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
565 /* If we were really extending the mode,
566 we would have to distinguish between zero-extension
567 and sign-extension. */
568 if (width != GET_MODE_BITSIZE (op_mode))
569 abort ();
570 val = arg0;
572 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
573 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
574 else
575 return 0;
576 break;
578 case SIGN_EXTEND:
579 if (op_mode == VOIDmode)
580 op_mode = mode;
581 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
583 /* If we were really extending the mode,
584 we would have to distinguish between zero-extension
585 and sign-extension. */
586 if (width != GET_MODE_BITSIZE (op_mode))
587 abort ();
588 val = arg0;
590 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
593 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
594 if (val
595 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
596 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
598 else
599 return 0;
600 break;
602 case SQRT:
603 case FLOAT_EXTEND:
604 case FLOAT_TRUNCATE:
605 case SS_TRUNCATE:
606 case US_TRUNCATE:
607 return 0;
609 default:
610 abort ();
613 val = trunc_int_for_mode (val, mode);
615 return GEN_INT (val);
618 /* We can do some operations on integer CONST_DOUBLEs. Also allow
619 for a DImode operation on a CONST_INT. */
620 else if (GET_MODE (trueop) == VOIDmode
621 && width <= HOST_BITS_PER_WIDE_INT * 2
622 && (GET_CODE (trueop) == CONST_DOUBLE
623 || GET_CODE (trueop) == CONST_INT))
625 unsigned HOST_WIDE_INT l1, lv;
626 HOST_WIDE_INT h1, hv;
628 if (GET_CODE (trueop) == CONST_DOUBLE)
629 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
630 else
631 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
633 switch (code)
635 case NOT:
636 lv = ~ l1;
637 hv = ~ h1;
638 break;
640 case NEG:
641 neg_double (l1, h1, &lv, &hv);
642 break;
644 case ABS:
645 if (h1 < 0)
646 neg_double (l1, h1, &lv, &hv);
647 else
648 lv = l1, hv = h1;
649 break;
651 case FFS:
652 hv = 0;
653 if (l1 == 0)
655 if (h1 == 0)
656 lv = 0;
657 else
658 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
660 else
661 lv = exact_log2 (l1 & -l1) + 1;
662 break;
664 case CLZ:
665 hv = 0;
666 if (h1 != 0)
667 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
668 - HOST_BITS_PER_WIDE_INT;
669 else if (l1 != 0)
670 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
671 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
672 lv = GET_MODE_BITSIZE (mode);
673 break;
675 case CTZ:
676 hv = 0;
677 if (l1 != 0)
678 lv = exact_log2 (l1 & -l1);
679 else if (h1 != 0)
680 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
681 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
682 lv = GET_MODE_BITSIZE (mode);
683 break;
685 case POPCOUNT:
686 hv = 0;
687 lv = 0;
688 while (l1)
689 lv++, l1 &= l1 - 1;
690 while (h1)
691 lv++, h1 &= h1 - 1;
692 break;
694 case PARITY:
695 hv = 0;
696 lv = 0;
697 while (l1)
698 lv++, l1 &= l1 - 1;
699 while (h1)
700 lv++, h1 &= h1 - 1;
701 lv &= 1;
702 break;
704 case TRUNCATE:
705 /* This is just a change-of-mode, so do nothing. */
706 lv = l1, hv = h1;
707 break;
709 case ZERO_EXTEND:
710 if (op_mode == VOIDmode)
711 abort ();
713 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
714 return 0;
716 hv = 0;
717 lv = l1 & GET_MODE_MASK (op_mode);
718 break;
720 case SIGN_EXTEND:
721 if (op_mode == VOIDmode
722 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
723 return 0;
724 else
726 lv = l1 & GET_MODE_MASK (op_mode);
727 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
728 && (lv & ((HOST_WIDE_INT) 1
729 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
730 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
732 hv = HWI_SIGN_EXTEND (lv);
734 break;
736 case SQRT:
737 return 0;
739 default:
740 return 0;
743 return immed_double_const (lv, hv, mode);
746 else if (GET_CODE (trueop) == CONST_DOUBLE
747 && GET_MODE_CLASS (mode) == MODE_FLOAT)
749 REAL_VALUE_TYPE d, t;
750 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
752 switch (code)
754 case SQRT:
755 if (HONOR_SNANS (mode) && real_isnan (&d))
756 return 0;
757 real_sqrt (&t, mode, &d);
758 d = t;
759 break;
760 case ABS:
761 d = REAL_VALUE_ABS (d);
762 break;
763 case NEG:
764 d = REAL_VALUE_NEGATE (d);
765 break;
766 case FLOAT_TRUNCATE:
767 d = real_value_truncate (mode, d);
768 break;
769 case FLOAT_EXTEND:
770 /* All this does is change the mode. */
771 break;
772 case FIX:
773 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
774 break;
776 default:
777 abort ();
779 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
782 else if (GET_CODE (trueop) == CONST_DOUBLE
783 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
784 && GET_MODE_CLASS (mode) == MODE_INT
785 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
787 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
788 operators are intentionally left unspecified (to ease implementation
789 by target backends), for consistency, this routine implements the
790 same semantics for constant folding as used by the middle-end. */
792 HOST_WIDE_INT xh, xl, th, tl;
793 REAL_VALUE_TYPE x, t;
794 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
795 switch (code)
797 case FIX:
798 if (REAL_VALUE_ISNAN (x))
799 return const0_rtx;
801 /* Test against the signed upper bound. */
802 if (width > HOST_BITS_PER_WIDE_INT)
804 th = ((unsigned HOST_WIDE_INT) 1
805 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
806 tl = -1;
808 else
810 th = 0;
811 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
813 real_from_integer (&t, VOIDmode, tl, th, 0);
814 if (REAL_VALUES_LESS (t, x))
816 xh = th;
817 xl = tl;
818 break;
821 /* Test against the signed lower bound. */
822 if (width > HOST_BITS_PER_WIDE_INT)
824 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
825 tl = 0;
827 else
829 th = -1;
830 tl = (HOST_WIDE_INT) -1 << (width - 1);
832 real_from_integer (&t, VOIDmode, tl, th, 0);
833 if (REAL_VALUES_LESS (x, t))
835 xh = th;
836 xl = tl;
837 break;
839 REAL_VALUE_TO_INT (&xl, &xh, x);
840 break;
842 case UNSIGNED_FIX:
843 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
844 return const0_rtx;
846 /* Test against the unsigned upper bound. */
847 if (width == 2*HOST_BITS_PER_WIDE_INT)
849 th = -1;
850 tl = -1;
852 else if (width >= HOST_BITS_PER_WIDE_INT)
854 th = ((unsigned HOST_WIDE_INT) 1
855 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
856 tl = -1;
858 else
860 th = 0;
861 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
863 real_from_integer (&t, VOIDmode, tl, th, 1);
864 if (REAL_VALUES_LESS (t, x))
866 xh = th;
867 xl = tl;
868 break;
871 REAL_VALUE_TO_INT (&xl, &xh, x);
872 break;
874 default:
875 abort ();
877 return immed_double_const (xl, xh, mode);
880 /* This was formerly used only for non-IEEE float.
881 eggert@twinsun.com says it is safe for IEEE also. */
882 else
884 enum rtx_code reversed;
885 rtx temp;
887 /* There are some simplifications we can do even if the operands
888 aren't constant. */
889 switch (code)
891 case NOT:
892 /* (not (not X)) == X. */
893 if (GET_CODE (op) == NOT)
894 return XEXP (op, 0);
896 /* (not (eq X Y)) == (ne X Y), etc. */
897 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
898 && (mode == BImode || STORE_FLAG_VALUE == -1)
899 && ((reversed = reversed_comparison_code (op, NULL_RTX))
900 != UNKNOWN))
901 return simplify_gen_relational (reversed, mode, VOIDmode,
902 XEXP (op, 0), XEXP (op, 1));
904 /* (not (plus X -1)) can become (neg X). */
905 if (GET_CODE (op) == PLUS
906 && XEXP (op, 1) == constm1_rtx)
907 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
909 /* Similarly, (not (neg X)) is (plus X -1). */
910 if (GET_CODE (op) == NEG)
911 return plus_constant (XEXP (op, 0), -1);
913 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
914 if (GET_CODE (op) == XOR
915 && GET_CODE (XEXP (op, 1)) == CONST_INT
916 && (temp = simplify_unary_operation (NOT, mode,
917 XEXP (op, 1),
918 mode)) != 0)
919 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
922 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
923 operands other than 1, but that is not valid. We could do a
924 similar simplification for (not (lshiftrt C X)) where C is
925 just the sign bit, but this doesn't seem common enough to
926 bother with. */
927 if (GET_CODE (op) == ASHIFT
928 && XEXP (op, 0) == const1_rtx)
930 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
931 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
934 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
935 by reversing the comparison code if valid. */
936 if (STORE_FLAG_VALUE == -1
937 && GET_RTX_CLASS (GET_CODE (op)) == '<'
938 && (reversed = reversed_comparison_code (op, NULL_RTX))
939 != UNKNOWN)
940 return simplify_gen_relational (reversed, mode, VOIDmode,
941 XEXP (op, 0), XEXP (op, 1));
943 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
944 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
945 so we can perform the above simplification. */
947 if (STORE_FLAG_VALUE == -1
948 && GET_CODE (op) == ASHIFTRT
949 && GET_CODE (XEXP (op, 1)) == CONST_INT
950 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
951 return simplify_gen_relational (GE, mode, VOIDmode,
952 XEXP (op, 0), const0_rtx);
954 break;
956 case NEG:
957 /* (neg (neg X)) == X. */
958 if (GET_CODE (op) == NEG)
959 return XEXP (op, 0);
961 /* (neg (plus X 1)) can become (not X). */
962 if (GET_CODE (op) == PLUS
963 && XEXP (op, 1) == const1_rtx)
964 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
966 /* Similarly, (neg (not X)) is (plus X 1). */
967 if (GET_CODE (op) == NOT)
968 return plus_constant (XEXP (op, 0), 1);
970 /* (neg (minus X Y)) can become (minus Y X). This transformation
971 isn't safe for modes with signed zeros, since if X and Y are
972 both +0, (minus Y X) is the same as (minus X Y). If the
973 rounding mode is towards +infinity (or -infinity) then the two
974 expressions will be rounded differently. */
975 if (GET_CODE (op) == MINUS
976 && !HONOR_SIGNED_ZEROS (mode)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
978 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
979 XEXP (op, 0));
981 if (GET_CODE (op) == PLUS
982 && !HONOR_SIGNED_ZEROS (mode)
983 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
985 /* (neg (plus A C)) is simplified to (minus -C A). */
986 if (GET_CODE (XEXP (op, 1)) == CONST_INT
987 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
989 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
990 mode);
991 if (temp)
992 return simplify_gen_binary (MINUS, mode, temp,
993 XEXP (op, 0));
996 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
997 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
998 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1001 /* (neg (mult A B)) becomes (mult (neg A) B).
1002 This works even for floating-point values. */
1003 if (GET_CODE (op) == MULT
1004 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1006 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1007 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1010 /* NEG commutes with ASHIFT since it is multiplication. Only do
1011 this if we can then eliminate the NEG (e.g., if the operand
1012 is a constant). */
1013 if (GET_CODE (op) == ASHIFT)
1015 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1016 mode);
1017 if (temp)
1018 return simplify_gen_binary (ASHIFT, mode, temp,
1019 XEXP (op, 1));
1022 break;
1024 case SIGN_EXTEND:
1025 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1026 becomes just the MINUS if its mode is MODE. This allows
1027 folding switch statements on machines using casesi (such as
1028 the VAX). */
1029 if (GET_CODE (op) == TRUNCATE
1030 && GET_MODE (XEXP (op, 0)) == mode
1031 && GET_CODE (XEXP (op, 0)) == MINUS
1032 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1033 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1034 return XEXP (op, 0);
1036 /* Check for a sign extension of a subreg of a promoted
1037 variable, where the promotion is sign-extended, and the
1038 target mode is the same as the variable's promotion. */
1039 if (GET_CODE (op) == SUBREG
1040 && SUBREG_PROMOTED_VAR_P (op)
1041 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1042 && GET_MODE (XEXP (op, 0)) == mode)
1043 return XEXP (op, 0);
1045 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1046 if (! POINTERS_EXTEND_UNSIGNED
1047 && mode == Pmode && GET_MODE (op) == ptr_mode
1048 && (CONSTANT_P (op)
1049 || (GET_CODE (op) == SUBREG
1050 && GET_CODE (SUBREG_REG (op)) == REG
1051 && REG_POINTER (SUBREG_REG (op))
1052 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1053 return convert_memory_address (Pmode, op);
1054 #endif
1055 break;
1057 case ZERO_EXTEND:
1058 /* Check for a zero extension of a subreg of a promoted
1059 variable, where the promotion is zero-extended, and the
1060 target mode is the same as the variable's promotion. */
1061 if (GET_CODE (op) == SUBREG
1062 && SUBREG_PROMOTED_VAR_P (op)
1063 && SUBREG_PROMOTED_UNSIGNED_P (op)
1064 && GET_MODE (XEXP (op, 0)) == mode)
1065 return XEXP (op, 0);
1067 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1068 if (POINTERS_EXTEND_UNSIGNED > 0
1069 && mode == Pmode && GET_MODE (op) == ptr_mode
1070 && (CONSTANT_P (op)
1071 || (GET_CODE (op) == SUBREG
1072 && GET_CODE (SUBREG_REG (op)) == REG
1073 && REG_POINTER (SUBREG_REG (op))
1074 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1075 return convert_memory_address (Pmode, op);
1076 #endif
1077 break;
1079 default:
1080 break;
1083 return 0;
1087 /* Subroutine of simplify_associative_operation. Return true if rtx OP
1088 is a suitable integer or floating point immediate constant. */
1089 static bool
1090 associative_constant_p (rtx op)
1092 if (GET_CODE (op) == CONST_INT
1093 || GET_CODE (op) == CONST_DOUBLE)
1094 return true;
1095 op = avoid_constant_pool_reference (op);
1096 return GET_CODE (op) == CONST_INT
1097 || GET_CODE (op) == CONST_DOUBLE;
1100 /* Subroutine of simplify_binary_operation to simplify an associative
1101 binary operation CODE with result mode MODE, operating on OP0 and OP1.
1102 Return 0 if no simplification is possible. */
1103 static rtx
1104 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1105 rtx op0, rtx op1)
1107 rtx tem;
1109 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
1110 if (GET_CODE (op0) == code
1111 && associative_constant_p (op1)
1112 && associative_constant_p (XEXP (op0, 1)))
1114 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1115 if (! tem)
1116 return tem;
1117 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1120 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1121 if (GET_CODE (op0) == code
1122 && GET_CODE (op1) == code
1123 && associative_constant_p (XEXP (op0, 1))
1124 && associative_constant_p (XEXP (op1, 1)))
1126 rtx c = simplify_binary_operation (code, mode,
1127 XEXP (op0, 1), XEXP (op1, 1));
1128 if (! c)
1129 return 0;
1130 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1131 return simplify_gen_binary (code, mode, tem, c);
1134 /* Canonicalize (x op c) op y as (x op y) op c. */
1135 if (GET_CODE (op0) == code
1136 && associative_constant_p (XEXP (op0, 1)))
1138 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1139 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1142 /* Canonicalize x op (y op c) as (x op y) op c. */
1143 if (GET_CODE (op1) == code
1144 && associative_constant_p (XEXP (op1, 1)))
1146 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1147 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1150 return 0;
1153 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1154 and OP1. Return 0 if no simplification is possible.
1156 Don't use this for relational operations such as EQ or LT.
1157 Use simplify_relational_operation instead. */
1159 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1160 rtx op0, rtx op1)
1162 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1163 HOST_WIDE_INT val;
1164 unsigned int width = GET_MODE_BITSIZE (mode);
1165 rtx tem;
1166 rtx trueop0 = avoid_constant_pool_reference (op0);
1167 rtx trueop1 = avoid_constant_pool_reference (op1);
1169 /* Relational operations don't work here. We must know the mode
1170 of the operands in order to do the comparison correctly.
1171 Assuming a full word can give incorrect results.
1172 Consider comparing 128 with -128 in QImode. */
1174 if (GET_RTX_CLASS (code) == '<')
1175 abort ();
1177 /* Make sure the constant is second. */
1178 if (GET_RTX_CLASS (code) == 'c'
1179 && swap_commutative_operands_p (trueop0, trueop1))
1181 tem = op0, op0 = op1, op1 = tem;
1182 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1185 if (VECTOR_MODE_P (mode)
1186 && GET_CODE (trueop0) == CONST_VECTOR
1187 && GET_CODE (trueop1) == CONST_VECTOR)
1189 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1190 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1191 enum machine_mode op0mode = GET_MODE (trueop0);
1192 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1193 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1194 enum machine_mode op1mode = GET_MODE (trueop1);
1195 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1196 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1197 rtvec v = rtvec_alloc (n_elts);
1198 unsigned int i;
1200 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1201 abort ();
1203 for (i = 0; i < n_elts; i++)
1205 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1206 CONST_VECTOR_ELT (trueop0, i),
1207 CONST_VECTOR_ELT (trueop1, i));
1208 if (!x)
1209 return 0;
1210 RTVEC_ELT (v, i) = x;
1213 return gen_rtx_CONST_VECTOR (mode, v);
1216 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1217 && GET_CODE (trueop0) == CONST_DOUBLE
1218 && GET_CODE (trueop1) == CONST_DOUBLE
1219 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1221 REAL_VALUE_TYPE f0, f1, value;
1223 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1224 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1225 f0 = real_value_truncate (mode, f0);
1226 f1 = real_value_truncate (mode, f1);
1228 if (HONOR_SNANS (mode)
1229 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1230 return 0;
1232 if (code == DIV
1233 && REAL_VALUES_EQUAL (f1, dconst0)
1234 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1235 return 0;
1237 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1239 value = real_value_truncate (mode, value);
1240 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1243 /* We can fold some multi-word operations. */
1244 if (GET_MODE_CLASS (mode) == MODE_INT
1245 && width == HOST_BITS_PER_WIDE_INT * 2
1246 && (GET_CODE (trueop0) == CONST_DOUBLE
1247 || GET_CODE (trueop0) == CONST_INT)
1248 && (GET_CODE (trueop1) == CONST_DOUBLE
1249 || GET_CODE (trueop1) == CONST_INT))
1251 unsigned HOST_WIDE_INT l1, l2, lv;
1252 HOST_WIDE_INT h1, h2, hv;
1254 if (GET_CODE (trueop0) == CONST_DOUBLE)
1255 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1256 else
1257 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1259 if (GET_CODE (trueop1) == CONST_DOUBLE)
1260 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1261 else
1262 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1264 switch (code)
1266 case MINUS:
1267 /* A - B == A + (-B). */
1268 neg_double (l2, h2, &lv, &hv);
1269 l2 = lv, h2 = hv;
1271 /* Fall through.... */
1273 case PLUS:
1274 add_double (l1, h1, l2, h2, &lv, &hv);
1275 break;
1277 case MULT:
1278 mul_double (l1, h1, l2, h2, &lv, &hv);
1279 break;
1281 case DIV: case MOD: case UDIV: case UMOD:
1282 /* We'd need to include tree.h to do this and it doesn't seem worth
1283 it. */
1284 return 0;
1286 case AND:
1287 lv = l1 & l2, hv = h1 & h2;
1288 break;
1290 case IOR:
1291 lv = l1 | l2, hv = h1 | h2;
1292 break;
1294 case XOR:
1295 lv = l1 ^ l2, hv = h1 ^ h2;
1296 break;
1298 case SMIN:
1299 if (h1 < h2
1300 || (h1 == h2
1301 && ((unsigned HOST_WIDE_INT) l1
1302 < (unsigned HOST_WIDE_INT) l2)))
1303 lv = l1, hv = h1;
1304 else
1305 lv = l2, hv = h2;
1306 break;
1308 case SMAX:
1309 if (h1 > h2
1310 || (h1 == h2
1311 && ((unsigned HOST_WIDE_INT) l1
1312 > (unsigned HOST_WIDE_INT) l2)))
1313 lv = l1, hv = h1;
1314 else
1315 lv = l2, hv = h2;
1316 break;
1318 case UMIN:
1319 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1320 || (h1 == h2
1321 && ((unsigned HOST_WIDE_INT) l1
1322 < (unsigned HOST_WIDE_INT) l2)))
1323 lv = l1, hv = h1;
1324 else
1325 lv = l2, hv = h2;
1326 break;
1328 case UMAX:
1329 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1330 || (h1 == h2
1331 && ((unsigned HOST_WIDE_INT) l1
1332 > (unsigned HOST_WIDE_INT) l2)))
1333 lv = l1, hv = h1;
1334 else
1335 lv = l2, hv = h2;
1336 break;
1338 case LSHIFTRT: case ASHIFTRT:
1339 case ASHIFT:
1340 case ROTATE: case ROTATERT:
1341 #ifdef SHIFT_COUNT_TRUNCATED
1342 if (SHIFT_COUNT_TRUNCATED)
1343 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1344 #endif
1346 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1347 return 0;
1349 if (code == LSHIFTRT || code == ASHIFTRT)
1350 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1351 code == ASHIFTRT);
1352 else if (code == ASHIFT)
1353 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1354 else if (code == ROTATE)
1355 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1356 else /* code == ROTATERT */
1357 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1358 break;
1360 default:
1361 return 0;
1364 return immed_double_const (lv, hv, mode);
1367 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1368 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1370 /* Even if we can't compute a constant result,
1371 there are some cases worth simplifying. */
1373 switch (code)
1375 case PLUS:
1376 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1377 when x is NaN, infinite, or finite and nonzero. They aren't
1378 when x is -0 and the rounding mode is not towards -infinity,
1379 since (-0) + 0 is then 0. */
1380 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1381 return op0;
1383 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1384 transformations are safe even for IEEE. */
1385 if (GET_CODE (op0) == NEG)
1386 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1387 else if (GET_CODE (op1) == NEG)
1388 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1390 /* (~a) + 1 -> -a */
1391 if (INTEGRAL_MODE_P (mode)
1392 && GET_CODE (op0) == NOT
1393 && trueop1 == const1_rtx)
1394 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1396 /* Handle both-operands-constant cases. We can only add
1397 CONST_INTs to constants since the sum of relocatable symbols
1398 can't be handled by most assemblers. Don't add CONST_INT
1399 to CONST_INT since overflow won't be computed properly if wider
1400 than HOST_BITS_PER_WIDE_INT. */
1402 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1403 && GET_CODE (op1) == CONST_INT)
1404 return plus_constant (op0, INTVAL (op1));
1405 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1406 && GET_CODE (op0) == CONST_INT)
1407 return plus_constant (op1, INTVAL (op0));
1409 /* See if this is something like X * C - X or vice versa or
1410 if the multiplication is written as a shift. If so, we can
1411 distribute and make a new multiply, shift, or maybe just
1412 have X (if C is 2 in the example above). But don't make
1413 real multiply if we didn't have one before. */
1415 if (! FLOAT_MODE_P (mode))
1417 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1418 rtx lhs = op0, rhs = op1;
1419 int had_mult = 0;
1421 if (GET_CODE (lhs) == NEG)
1422 coeff0 = -1, lhs = XEXP (lhs, 0);
1423 else if (GET_CODE (lhs) == MULT
1424 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1426 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1427 had_mult = 1;
1429 else if (GET_CODE (lhs) == ASHIFT
1430 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1431 && INTVAL (XEXP (lhs, 1)) >= 0
1432 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1434 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1435 lhs = XEXP (lhs, 0);
1438 if (GET_CODE (rhs) == NEG)
1439 coeff1 = -1, rhs = XEXP (rhs, 0);
1440 else if (GET_CODE (rhs) == MULT
1441 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1443 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1444 had_mult = 1;
1446 else if (GET_CODE (rhs) == ASHIFT
1447 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1448 && INTVAL (XEXP (rhs, 1)) >= 0
1449 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1451 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1452 rhs = XEXP (rhs, 0);
1455 if (rtx_equal_p (lhs, rhs))
1457 tem = simplify_gen_binary (MULT, mode, lhs,
1458 GEN_INT (coeff0 + coeff1));
1459 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1463 /* If one of the operands is a PLUS or a MINUS, see if we can
1464 simplify this by the associative law.
1465 Don't use the associative law for floating point.
1466 The inaccuracy makes it nonassociative,
1467 and subtle programs can break if operations are associated. */
1469 if (INTEGRAL_MODE_P (mode)
1470 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1471 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1472 || (GET_CODE (op0) == CONST
1473 && GET_CODE (XEXP (op0, 0)) == PLUS)
1474 || (GET_CODE (op1) == CONST
1475 && GET_CODE (XEXP (op1, 0)) == PLUS))
1476 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1477 return tem;
1479 /* Reassociate floating point addition only when the user
1480 specifies unsafe math optimizations. */
1481 if (FLOAT_MODE_P (mode)
1482 && flag_unsafe_math_optimizations)
1484 tem = simplify_associative_operation (code, mode, op0, op1);
1485 if (tem)
1486 return tem;
1488 break;
1490 case COMPARE:
1491 #ifdef HAVE_cc0
1492 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1493 using cc0, in which case we want to leave it as a COMPARE
1494 so we can distinguish it from a register-register-copy.
1496 In IEEE floating point, x-0 is not the same as x. */
1498 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1499 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1500 && trueop1 == CONST0_RTX (mode))
1501 return op0;
1502 #endif
1504 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1505 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1506 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1507 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1509 rtx xop00 = XEXP (op0, 0);
1510 rtx xop10 = XEXP (op1, 0);
1512 #ifdef HAVE_cc0
1513 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1514 #else
1515 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1516 && GET_MODE (xop00) == GET_MODE (xop10)
1517 && REGNO (xop00) == REGNO (xop10)
1518 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1519 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1520 #endif
1521 return xop00;
1523 break;
1525 case MINUS:
1526 /* We can't assume x-x is 0 even with non-IEEE floating point,
1527 but since it is zero except in very strange circumstances, we
1528 will treat it as zero with -funsafe-math-optimizations. */
1529 if (rtx_equal_p (trueop0, trueop1)
1530 && ! side_effects_p (op0)
1531 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1532 return CONST0_RTX (mode);
1534 /* Change subtraction from zero into negation. (0 - x) is the
1535 same as -x when x is NaN, infinite, or finite and nonzero.
1536 But if the mode has signed zeros, and does not round towards
1537 -infinity, then 0 - 0 is 0, not -0. */
1538 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1539 return simplify_gen_unary (NEG, mode, op1, mode);
1541 /* (-1 - a) is ~a. */
1542 if (trueop0 == constm1_rtx)
1543 return simplify_gen_unary (NOT, mode, op1, mode);
1545 /* Subtracting 0 has no effect unless the mode has signed zeros
1546 and supports rounding towards -infinity. In such a case,
1547 0 - 0 is -0. */
1548 if (!(HONOR_SIGNED_ZEROS (mode)
1549 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1550 && trueop1 == CONST0_RTX (mode))
1551 return op0;
1553 /* See if this is something like X * C - X or vice versa or
1554 if the multiplication is written as a shift. If so, we can
1555 distribute and make a new multiply, shift, or maybe just
1556 have X (if C is 2 in the example above). But don't make
1557 real multiply if we didn't have one before. */
1559 if (! FLOAT_MODE_P (mode))
1561 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1562 rtx lhs = op0, rhs = op1;
1563 int had_mult = 0;
1565 if (GET_CODE (lhs) == NEG)
1566 coeff0 = -1, lhs = XEXP (lhs, 0);
1567 else if (GET_CODE (lhs) == MULT
1568 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1570 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1571 had_mult = 1;
1573 else if (GET_CODE (lhs) == ASHIFT
1574 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1575 && INTVAL (XEXP (lhs, 1)) >= 0
1576 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1578 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1579 lhs = XEXP (lhs, 0);
1582 if (GET_CODE (rhs) == NEG)
1583 coeff1 = - 1, rhs = XEXP (rhs, 0);
1584 else if (GET_CODE (rhs) == MULT
1585 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1587 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1588 had_mult = 1;
1590 else if (GET_CODE (rhs) == ASHIFT
1591 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1592 && INTVAL (XEXP (rhs, 1)) >= 0
1593 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1595 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1596 rhs = XEXP (rhs, 0);
1599 if (rtx_equal_p (lhs, rhs))
1601 tem = simplify_gen_binary (MULT, mode, lhs,
1602 GEN_INT (coeff0 - coeff1));
1603 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1607 /* (a - (-b)) -> (a + b). True even for IEEE. */
1608 if (GET_CODE (op1) == NEG)
1609 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1611 /* (-x - c) may be simplified as (-c - x). */
1612 if (GET_CODE (op0) == NEG
1613 && (GET_CODE (op1) == CONST_INT
1614 || GET_CODE (op1) == CONST_DOUBLE))
1616 tem = simplify_unary_operation (NEG, mode, op1, mode);
1617 if (tem)
1618 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1621 /* If one of the operands is a PLUS or a MINUS, see if we can
1622 simplify this by the associative law.
1623 Don't use the associative law for floating point.
1624 The inaccuracy makes it nonassociative,
1625 and subtle programs can break if operations are associated. */
1627 if (INTEGRAL_MODE_P (mode)
1628 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1629 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1630 || (GET_CODE (op0) == CONST
1631 && GET_CODE (XEXP (op0, 0)) == PLUS)
1632 || (GET_CODE (op1) == CONST
1633 && GET_CODE (XEXP (op1, 0)) == PLUS))
1634 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1635 return tem;
1637 /* Don't let a relocatable value get a negative coeff. */
1638 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1639 return simplify_gen_binary (PLUS, mode,
1640 op0,
1641 neg_const_int (mode, op1));
1643 /* (x - (x & y)) -> (x & ~y) */
1644 if (GET_CODE (op1) == AND)
1646 if (rtx_equal_p (op0, XEXP (op1, 0)))
1648 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1649 GET_MODE (XEXP (op1, 1)));
1650 return simplify_gen_binary (AND, mode, op0, tem);
1652 if (rtx_equal_p (op0, XEXP (op1, 1)))
1654 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1655 GET_MODE (XEXP (op1, 0)));
1656 return simplify_gen_binary (AND, mode, op0, tem);
1659 break;
1661 case MULT:
1662 if (trueop1 == constm1_rtx)
1663 return simplify_gen_unary (NEG, mode, op0, mode);
1665 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1666 x is NaN, since x * 0 is then also NaN. Nor is it valid
1667 when the mode has signed zeros, since multiplying a negative
1668 number by 0 will give -0, not 0. */
1669 if (!HONOR_NANS (mode)
1670 && !HONOR_SIGNED_ZEROS (mode)
1671 && trueop1 == CONST0_RTX (mode)
1672 && ! side_effects_p (op0))
1673 return op1;
1675 /* In IEEE floating point, x*1 is not equivalent to x for
1676 signalling NaNs. */
1677 if (!HONOR_SNANS (mode)
1678 && trueop1 == CONST1_RTX (mode))
1679 return op0;
1681 /* Convert multiply by constant power of two into shift unless
1682 we are still generating RTL. This test is a kludge. */
1683 if (GET_CODE (trueop1) == CONST_INT
1684 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1685 /* If the mode is larger than the host word size, and the
1686 uppermost bit is set, then this isn't a power of two due
1687 to implicit sign extension. */
1688 && (width <= HOST_BITS_PER_WIDE_INT
1689 || val != HOST_BITS_PER_WIDE_INT - 1)
1690 && ! rtx_equal_function_value_matters)
1691 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1693 /* x*2 is x+x and x*(-1) is -x */
1694 if (GET_CODE (trueop1) == CONST_DOUBLE
1695 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1696 && GET_MODE (op0) == mode)
1698 REAL_VALUE_TYPE d;
1699 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1701 if (REAL_VALUES_EQUAL (d, dconst2))
1702 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1704 if (REAL_VALUES_EQUAL (d, dconstm1))
1705 return simplify_gen_unary (NEG, mode, op0, mode);
1708 /* Reassociate multiplication, but for floating point MULTs
1709 only when the user specifies unsafe math optimizations. */
1710 if (! FLOAT_MODE_P (mode)
1711 || flag_unsafe_math_optimizations)
1713 tem = simplify_associative_operation (code, mode, op0, op1);
1714 if (tem)
1715 return tem;
1717 break;
1719 case IOR:
1720 if (trueop1 == const0_rtx)
1721 return op0;
1722 if (GET_CODE (trueop1) == CONST_INT
1723 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1724 == GET_MODE_MASK (mode)))
1725 return op1;
1726 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1727 return op0;
1728 /* A | (~A) -> -1 */
1729 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1730 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1731 && ! side_effects_p (op0)
1732 && GET_MODE_CLASS (mode) != MODE_CC)
1733 return constm1_rtx;
1734 tem = simplify_associative_operation (code, mode, op0, op1);
1735 if (tem)
1736 return tem;
1737 break;
1739 case XOR:
1740 if (trueop1 == const0_rtx)
1741 return op0;
1742 if (GET_CODE (trueop1) == CONST_INT
1743 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1744 == GET_MODE_MASK (mode)))
1745 return simplify_gen_unary (NOT, mode, op0, mode);
1746 if (trueop0 == trueop1 && ! side_effects_p (op0)
1747 && GET_MODE_CLASS (mode) != MODE_CC)
1748 return const0_rtx;
1749 tem = simplify_associative_operation (code, mode, op0, op1);
1750 if (tem)
1751 return tem;
1752 break;
1754 case AND:
1755 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1756 return const0_rtx;
1757 if (GET_CODE (trueop1) == CONST_INT
1758 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1759 == GET_MODE_MASK (mode)))
1760 return op0;
1761 if (trueop0 == trueop1 && ! side_effects_p (op0)
1762 && GET_MODE_CLASS (mode) != MODE_CC)
1763 return op0;
1764 /* A & (~A) -> 0 */
1765 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1766 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1767 && ! side_effects_p (op0)
1768 && GET_MODE_CLASS (mode) != MODE_CC)
1769 return const0_rtx;
1770 tem = simplify_associative_operation (code, mode, op0, op1);
1771 if (tem)
1772 return tem;
1773 break;
1775 case UDIV:
1776 /* Convert divide by power of two into shift (divide by 1 handled
1777 below). */
1778 if (GET_CODE (trueop1) == CONST_INT
1779 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1780 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1782 /* Fall through.... */
1784 case DIV:
1785 if (trueop1 == CONST1_RTX (mode))
1787 /* On some platforms DIV uses narrower mode than its
1788 operands. */
1789 rtx x = gen_lowpart_common (mode, op0);
1790 if (x)
1791 return x;
1792 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1793 return gen_lowpart_SUBREG (mode, op0);
1794 else
1795 return op0;
1798 /* Maybe change 0 / x to 0. This transformation isn't safe for
1799 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1800 Nor is it safe for modes with signed zeros, since dividing
1801 0 by a negative number gives -0, not 0. */
1802 if (!HONOR_NANS (mode)
1803 && !HONOR_SIGNED_ZEROS (mode)
1804 && trueop0 == CONST0_RTX (mode)
1805 && ! side_effects_p (op1))
1806 return op0;
1808 /* Change division by a constant into multiplication. Only do
1809 this with -funsafe-math-optimizations. */
1810 else if (GET_CODE (trueop1) == CONST_DOUBLE
1811 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1812 && trueop1 != CONST0_RTX (mode)
1813 && flag_unsafe_math_optimizations)
1815 REAL_VALUE_TYPE d;
1816 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1818 if (! REAL_VALUES_EQUAL (d, dconst0))
1820 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1821 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1822 return simplify_gen_binary (MULT, mode, op0, tem);
1825 break;
1827 case UMOD:
1828 /* Handle modulus by power of two (mod with 1 handled below). */
1829 if (GET_CODE (trueop1) == CONST_INT
1830 && exact_log2 (INTVAL (trueop1)) > 0)
1831 return simplify_gen_binary (AND, mode, op0,
1832 GEN_INT (INTVAL (op1) - 1));
1834 /* Fall through.... */
1836 case MOD:
1837 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1838 && ! side_effects_p (op0) && ! side_effects_p (op1))
1839 return const0_rtx;
1840 break;
1842 case ROTATERT:
1843 case ROTATE:
1844 case ASHIFTRT:
1845 /* Rotating ~0 always results in ~0. */
1846 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1847 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1848 && ! side_effects_p (op1))
1849 return op0;
1851 /* Fall through.... */
1853 case ASHIFT:
1854 case LSHIFTRT:
1855 if (trueop1 == const0_rtx)
1856 return op0;
1857 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1858 return op0;
1859 break;
1861 case SMIN:
1862 if (width <= HOST_BITS_PER_WIDE_INT
1863 && GET_CODE (trueop1) == CONST_INT
1864 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1865 && ! side_effects_p (op0))
1866 return op1;
1867 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1868 return op0;
1869 tem = simplify_associative_operation (code, mode, op0, op1);
1870 if (tem)
1871 return tem;
1872 break;
1874 case SMAX:
1875 if (width <= HOST_BITS_PER_WIDE_INT
1876 && GET_CODE (trueop1) == CONST_INT
1877 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1878 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1879 && ! side_effects_p (op0))
1880 return op1;
1881 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1882 return op0;
1883 tem = simplify_associative_operation (code, mode, op0, op1);
1884 if (tem)
1885 return tem;
1886 break;
1888 case UMIN:
1889 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1890 return op1;
1891 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1892 return op0;
1893 tem = simplify_associative_operation (code, mode, op0, op1);
1894 if (tem)
1895 return tem;
1896 break;
1898 case UMAX:
1899 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1900 return op1;
1901 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1902 return op0;
1903 tem = simplify_associative_operation (code, mode, op0, op1);
1904 if (tem)
1905 return tem;
1906 break;
1908 case SS_PLUS:
1909 case US_PLUS:
1910 case SS_MINUS:
1911 case US_MINUS:
1912 /* ??? There are simplifications that can be done. */
1913 return 0;
1915 case VEC_SELECT:
1916 if (!VECTOR_MODE_P (mode))
1918 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1919 || (mode
1920 != GET_MODE_INNER (GET_MODE (trueop0)))
1921 || GET_CODE (trueop1) != PARALLEL
1922 || XVECLEN (trueop1, 0) != 1
1923 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1924 abort ();
1926 if (GET_CODE (trueop0) == CONST_VECTOR)
1927 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1929 else
1931 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1932 || (GET_MODE_INNER (mode)
1933 != GET_MODE_INNER (GET_MODE (trueop0)))
1934 || GET_CODE (trueop1) != PARALLEL)
1935 abort ();
1937 if (GET_CODE (trueop0) == CONST_VECTOR)
1939 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1940 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1941 rtvec v = rtvec_alloc (n_elts);
1942 unsigned int i;
1944 if (XVECLEN (trueop1, 0) != (int) n_elts)
1945 abort ();
1946 for (i = 0; i < n_elts; i++)
1948 rtx x = XVECEXP (trueop1, 0, i);
1950 if (GET_CODE (x) != CONST_INT)
1951 abort ();
1952 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1955 return gen_rtx_CONST_VECTOR (mode, v);
1958 return 0;
1959 case VEC_CONCAT:
1961 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1962 ? GET_MODE (trueop0)
1963 : GET_MODE_INNER (mode));
1964 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1965 ? GET_MODE (trueop1)
1966 : GET_MODE_INNER (mode));
1968 if (!VECTOR_MODE_P (mode)
1969 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1970 != GET_MODE_SIZE (mode)))
1971 abort ();
1973 if ((VECTOR_MODE_P (op0_mode)
1974 && (GET_MODE_INNER (mode)
1975 != GET_MODE_INNER (op0_mode)))
1976 || (!VECTOR_MODE_P (op0_mode)
1977 && GET_MODE_INNER (mode) != op0_mode))
1978 abort ();
1980 if ((VECTOR_MODE_P (op1_mode)
1981 && (GET_MODE_INNER (mode)
1982 != GET_MODE_INNER (op1_mode)))
1983 || (!VECTOR_MODE_P (op1_mode)
1984 && GET_MODE_INNER (mode) != op1_mode))
1985 abort ();
1987 if ((GET_CODE (trueop0) == CONST_VECTOR
1988 || GET_CODE (trueop0) == CONST_INT
1989 || GET_CODE (trueop0) == CONST_DOUBLE)
1990 && (GET_CODE (trueop1) == CONST_VECTOR
1991 || GET_CODE (trueop1) == CONST_INT
1992 || GET_CODE (trueop1) == CONST_DOUBLE))
1994 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1995 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1996 rtvec v = rtvec_alloc (n_elts);
1997 unsigned int i;
1998 unsigned in_n_elts = 1;
2000 if (VECTOR_MODE_P (op0_mode))
2001 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2002 for (i = 0; i < n_elts; i++)
2004 if (i < in_n_elts)
2006 if (!VECTOR_MODE_P (op0_mode))
2007 RTVEC_ELT (v, i) = trueop0;
2008 else
2009 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2011 else
2013 if (!VECTOR_MODE_P (op1_mode))
2014 RTVEC_ELT (v, i) = trueop1;
2015 else
2016 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2017 i - in_n_elts);
2021 return gen_rtx_CONST_VECTOR (mode, v);
2024 return 0;
2026 default:
2027 abort ();
2030 return 0;
2033 /* Get the integer argument values in two forms:
2034 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2036 arg0 = INTVAL (trueop0);
2037 arg1 = INTVAL (trueop1);
2039 if (width < HOST_BITS_PER_WIDE_INT)
2041 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2042 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2044 arg0s = arg0;
2045 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2046 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2048 arg1s = arg1;
2049 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2050 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2052 else
2054 arg0s = arg0;
2055 arg1s = arg1;
2058 /* Compute the value of the arithmetic. */
2060 switch (code)
2062 case PLUS:
2063 val = arg0s + arg1s;
2064 break;
2066 case MINUS:
2067 val = arg0s - arg1s;
2068 break;
2070 case MULT:
2071 val = arg0s * arg1s;
2072 break;
2074 case DIV:
2075 if (arg1s == 0
2076 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2077 && arg1s == -1))
2078 return 0;
2079 val = arg0s / arg1s;
2080 break;
2082 case MOD:
2083 if (arg1s == 0
2084 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2085 && arg1s == -1))
2086 return 0;
2087 val = arg0s % arg1s;
2088 break;
2090 case UDIV:
2091 if (arg1 == 0
2092 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2093 && arg1s == -1))
2094 return 0;
2095 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2096 break;
2098 case UMOD:
2099 if (arg1 == 0
2100 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2101 && arg1s == -1))
2102 return 0;
2103 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2104 break;
2106 case AND:
2107 val = arg0 & arg1;
2108 break;
2110 case IOR:
2111 val = arg0 | arg1;
2112 break;
2114 case XOR:
2115 val = arg0 ^ arg1;
2116 break;
2118 case LSHIFTRT:
2119 /* If shift count is undefined, don't fold it; let the machine do
2120 what it wants. But truncate it if the machine will do that. */
2121 if (arg1 < 0)
2122 return 0;
2124 #ifdef SHIFT_COUNT_TRUNCATED
2125 if (SHIFT_COUNT_TRUNCATED)
2126 arg1 %= width;
2127 #endif
2129 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2130 break;
2132 case ASHIFT:
2133 if (arg1 < 0)
2134 return 0;
2136 #ifdef SHIFT_COUNT_TRUNCATED
2137 if (SHIFT_COUNT_TRUNCATED)
2138 arg1 %= width;
2139 #endif
2141 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2142 break;
2144 case ASHIFTRT:
2145 if (arg1 < 0)
2146 return 0;
2148 #ifdef SHIFT_COUNT_TRUNCATED
2149 if (SHIFT_COUNT_TRUNCATED)
2150 arg1 %= width;
2151 #endif
2153 val = arg0s >> arg1;
2155 /* Bootstrap compiler may not have sign extended the right shift.
2156 Manually extend the sign to insure bootstrap cc matches gcc. */
2157 if (arg0s < 0 && arg1 > 0)
2158 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2160 break;
2162 case ROTATERT:
2163 if (arg1 < 0)
2164 return 0;
2166 arg1 %= width;
2167 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2168 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2169 break;
2171 case ROTATE:
2172 if (arg1 < 0)
2173 return 0;
2175 arg1 %= width;
2176 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2177 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2178 break;
2180 case COMPARE:
2181 /* Do nothing here. */
2182 return 0;
2184 case SMIN:
2185 val = arg0s <= arg1s ? arg0s : arg1s;
2186 break;
2188 case UMIN:
2189 val = ((unsigned HOST_WIDE_INT) arg0
2190 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2191 break;
2193 case SMAX:
2194 val = arg0s > arg1s ? arg0s : arg1s;
2195 break;
2197 case UMAX:
2198 val = ((unsigned HOST_WIDE_INT) arg0
2199 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2200 break;
2202 case SS_PLUS:
2203 case US_PLUS:
2204 case SS_MINUS:
2205 case US_MINUS:
2206 /* ??? There are simplifications that can be done. */
2207 return 0;
2209 default:
2210 abort ();
2213 val = trunc_int_for_mode (val, mode);
2215 return GEN_INT (val);
2218 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2219 PLUS or MINUS.
2221 Rather than test for specific case, we do this by a brute-force method
2222 and do all possible simplifications until no more changes occur. Then
2223 we rebuild the operation.
2225 If FORCE is true, then always generate the rtx. This is used to
2226 canonicalize stuff emitted from simplify_gen_binary. Note that this
2227 can still fail if the rtx is too complex. It won't fail just because
2228 the result is not 'simpler' than the input, however. */
2230 struct simplify_plus_minus_op_data
2232 rtx op;
2233 int neg;
2236 static int
2237 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2239 const struct simplify_plus_minus_op_data *d1 = p1;
2240 const struct simplify_plus_minus_op_data *d2 = p2;
2242 return (commutative_operand_precedence (d2->op)
2243 - commutative_operand_precedence (d1->op));
2246 static rtx
2247 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2248 rtx op1, int force)
2250 struct simplify_plus_minus_op_data ops[8];
2251 rtx result, tem;
2252 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2253 int first, negate, changed;
2254 int i, j;
2256 memset (ops, 0, sizeof ops);
2258 /* Set up the two operands and then expand them until nothing has been
2259 changed. If we run out of room in our array, give up; this should
2260 almost never happen. */
2262 ops[0].op = op0;
2263 ops[0].neg = 0;
2264 ops[1].op = op1;
2265 ops[1].neg = (code == MINUS);
2269 changed = 0;
2271 for (i = 0; i < n_ops; i++)
2273 rtx this_op = ops[i].op;
2274 int this_neg = ops[i].neg;
2275 enum rtx_code this_code = GET_CODE (this_op);
2277 switch (this_code)
2279 case PLUS:
2280 case MINUS:
2281 if (n_ops == 7)
2282 return NULL_RTX;
2284 ops[n_ops].op = XEXP (this_op, 1);
2285 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2286 n_ops++;
2288 ops[i].op = XEXP (this_op, 0);
2289 input_ops++;
2290 changed = 1;
2291 break;
2293 case NEG:
2294 ops[i].op = XEXP (this_op, 0);
2295 ops[i].neg = ! this_neg;
2296 changed = 1;
2297 break;
2299 case CONST:
2300 if (n_ops < 7
2301 && GET_CODE (XEXP (this_op, 0)) == PLUS
2302 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2303 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2305 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2306 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2307 ops[n_ops].neg = this_neg;
2308 n_ops++;
2309 input_consts++;
2310 changed = 1;
2312 break;
2314 case NOT:
2315 /* ~a -> (-a - 1) */
2316 if (n_ops != 7)
2318 ops[n_ops].op = constm1_rtx;
2319 ops[n_ops++].neg = this_neg;
2320 ops[i].op = XEXP (this_op, 0);
2321 ops[i].neg = !this_neg;
2322 changed = 1;
2324 break;
2326 case CONST_INT:
2327 if (this_neg)
2329 ops[i].op = neg_const_int (mode, this_op);
2330 ops[i].neg = 0;
2331 changed = 1;
2333 break;
2335 default:
2336 break;
2340 while (changed);
2342 /* If we only have two operands, we can't do anything. */
2343 if (n_ops <= 2 && !force)
2344 return NULL_RTX;
2346 /* Count the number of CONSTs we didn't split above. */
2347 for (i = 0; i < n_ops; i++)
2348 if (GET_CODE (ops[i].op) == CONST)
2349 input_consts++;
2351 /* Now simplify each pair of operands until nothing changes. The first
2352 time through just simplify constants against each other. */
2354 first = 1;
2357 changed = first;
2359 for (i = 0; i < n_ops - 1; i++)
2360 for (j = i + 1; j < n_ops; j++)
2362 rtx lhs = ops[i].op, rhs = ops[j].op;
2363 int lneg = ops[i].neg, rneg = ops[j].neg;
2365 if (lhs != 0 && rhs != 0
2366 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2368 enum rtx_code ncode = PLUS;
2370 if (lneg != rneg)
2372 ncode = MINUS;
2373 if (lneg)
2374 tem = lhs, lhs = rhs, rhs = tem;
2376 else if (swap_commutative_operands_p (lhs, rhs))
2377 tem = lhs, lhs = rhs, rhs = tem;
2379 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2381 /* Reject "simplifications" that just wrap the two
2382 arguments in a CONST. Failure to do so can result
2383 in infinite recursion with simplify_binary_operation
2384 when it calls us to simplify CONST operations. */
2385 if (tem
2386 && ! (GET_CODE (tem) == CONST
2387 && GET_CODE (XEXP (tem, 0)) == ncode
2388 && XEXP (XEXP (tem, 0), 0) == lhs
2389 && XEXP (XEXP (tem, 0), 1) == rhs)
2390 /* Don't allow -x + -1 -> ~x simplifications in the
2391 first pass. This allows us the chance to combine
2392 the -1 with other constants. */
2393 && ! (first
2394 && GET_CODE (tem) == NOT
2395 && XEXP (tem, 0) == rhs))
2397 lneg &= rneg;
2398 if (GET_CODE (tem) == NEG)
2399 tem = XEXP (tem, 0), lneg = !lneg;
2400 if (GET_CODE (tem) == CONST_INT && lneg)
2401 tem = neg_const_int (mode, tem), lneg = 0;
2403 ops[i].op = tem;
2404 ops[i].neg = lneg;
2405 ops[j].op = NULL_RTX;
2406 changed = 1;
2411 first = 0;
2413 while (changed);
2415 /* Pack all the operands to the lower-numbered entries. */
2416 for (i = 0, j = 0; j < n_ops; j++)
2417 if (ops[j].op)
2418 ops[i++] = ops[j];
2419 n_ops = i;
2421 /* Sort the operations based on swap_commutative_operands_p. */
2422 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2424 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2425 if (n_ops == 2
2426 && GET_CODE (ops[1].op) == CONST_INT
2427 && CONSTANT_P (ops[0].op)
2428 && ops[0].neg)
2429 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2431 /* We suppressed creation of trivial CONST expressions in the
2432 combination loop to avoid recursion. Create one manually now.
2433 The combination loop should have ensured that there is exactly
2434 one CONST_INT, and the sort will have ensured that it is last
2435 in the array and that any other constant will be next-to-last. */
2437 if (n_ops > 1
2438 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2439 && CONSTANT_P (ops[n_ops - 2].op))
2441 rtx value = ops[n_ops - 1].op;
2442 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2443 value = neg_const_int (mode, value);
2444 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2445 n_ops--;
2448 /* Count the number of CONSTs that we generated. */
2449 n_consts = 0;
2450 for (i = 0; i < n_ops; i++)
2451 if (GET_CODE (ops[i].op) == CONST)
2452 n_consts++;
2454 /* Give up if we didn't reduce the number of operands we had. Make
2455 sure we count a CONST as two operands. If we have the same
2456 number of operands, but have made more CONSTs than before, this
2457 is also an improvement, so accept it. */
2458 if (!force
2459 && (n_ops + n_consts > input_ops
2460 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2461 return NULL_RTX;
2463 /* Put a non-negated operand first. If there aren't any, make all
2464 operands positive and negate the whole thing later. */
2466 negate = 0;
2467 for (i = 0; i < n_ops && ops[i].neg; i++)
2468 continue;
2469 if (i == n_ops)
2471 for (i = 0; i < n_ops; i++)
2472 ops[i].neg = 0;
2473 negate = 1;
2475 else if (i != 0)
2477 tem = ops[0].op;
2478 ops[0] = ops[i];
2479 ops[i].op = tem;
2480 ops[i].neg = 1;
2483 /* Now make the result by performing the requested operations. */
2484 result = ops[0].op;
2485 for (i = 1; i < n_ops; i++)
2486 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2487 mode, result, ops[i].op);
2489 return negate ? gen_rtx_NEG (mode, result) : result;
2492 /* Like simplify_binary_operation except used for relational operators.
2493 MODE is the mode of the operands, not that of the result. If MODE
2494 is VOIDmode, both operands must also be VOIDmode and we compare the
2495 operands in "infinite precision".
2497 If no simplification is possible, this function returns zero. Otherwise,
2498 it returns either const_true_rtx or const0_rtx. */
2501 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2502 rtx op0, rtx op1)
2504 int equal, op0lt, op0ltu, op1lt, op1ltu;
2505 rtx tem;
2506 rtx trueop0;
2507 rtx trueop1;
2509 if (mode == VOIDmode
2510 && (GET_MODE (op0) != VOIDmode
2511 || GET_MODE (op1) != VOIDmode))
2512 abort ();
2514 /* If op0 is a compare, extract the comparison arguments from it. */
2515 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2516 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2518 trueop0 = avoid_constant_pool_reference (op0);
2519 trueop1 = avoid_constant_pool_reference (op1);
2521 /* We can't simplify MODE_CC values since we don't know what the
2522 actual comparison is. */
2523 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2524 return 0;
2526 /* Make sure the constant is second. */
2527 if (swap_commutative_operands_p (trueop0, trueop1))
2529 tem = op0, op0 = op1, op1 = tem;
2530 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2531 code = swap_condition (code);
2534 /* For integer comparisons of A and B maybe we can simplify A - B and can
2535 then simplify a comparison of that with zero. If A and B are both either
2536 a register or a CONST_INT, this can't help; testing for these cases will
2537 prevent infinite recursion here and speed things up.
2539 If CODE is an unsigned comparison, then we can never do this optimization,
2540 because it gives an incorrect result if the subtraction wraps around zero.
2541 ANSI C defines unsigned operations such that they never overflow, and
2542 thus such cases can not be ignored. */
2544 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2545 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2546 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2547 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2548 && code != GTU && code != GEU && code != LTU && code != LEU)
2549 return simplify_relational_operation (signed_condition (code),
2550 mode, tem, const0_rtx);
2552 if (flag_unsafe_math_optimizations && code == ORDERED)
2553 return const_true_rtx;
2555 if (flag_unsafe_math_optimizations && code == UNORDERED)
2556 return const0_rtx;
2558 /* For modes without NaNs, if the two operands are equal, we know the
2559 result except if they have side-effects. */
2560 if (! HONOR_NANS (GET_MODE (trueop0))
2561 && rtx_equal_p (trueop0, trueop1)
2562 && ! side_effects_p (trueop0))
2563 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2565 /* If the operands are floating-point constants, see if we can fold
2566 the result. */
2567 else if (GET_CODE (trueop0) == CONST_DOUBLE
2568 && GET_CODE (trueop1) == CONST_DOUBLE
2569 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2571 REAL_VALUE_TYPE d0, d1;
2573 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2574 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2576 /* Comparisons are unordered iff at least one of the values is NaN. */
2577 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2578 switch (code)
2580 case UNEQ:
2581 case UNLT:
2582 case UNGT:
2583 case UNLE:
2584 case UNGE:
2585 case NE:
2586 case UNORDERED:
2587 return const_true_rtx;
2588 case EQ:
2589 case LT:
2590 case GT:
2591 case LE:
2592 case GE:
2593 case LTGT:
2594 case ORDERED:
2595 return const0_rtx;
2596 default:
2597 return 0;
2600 equal = REAL_VALUES_EQUAL (d0, d1);
2601 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2602 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2605 /* Otherwise, see if the operands are both integers. */
2606 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2607 && (GET_CODE (trueop0) == CONST_DOUBLE
2608 || GET_CODE (trueop0) == CONST_INT)
2609 && (GET_CODE (trueop1) == CONST_DOUBLE
2610 || GET_CODE (trueop1) == CONST_INT))
2612 int width = GET_MODE_BITSIZE (mode);
2613 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2614 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2616 /* Get the two words comprising each integer constant. */
2617 if (GET_CODE (trueop0) == CONST_DOUBLE)
2619 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2620 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2622 else
2624 l0u = l0s = INTVAL (trueop0);
2625 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2628 if (GET_CODE (trueop1) == CONST_DOUBLE)
2630 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2631 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2633 else
2635 l1u = l1s = INTVAL (trueop1);
2636 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2639 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2640 we have to sign or zero-extend the values. */
2641 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2643 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2644 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2646 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2647 l0s |= ((HOST_WIDE_INT) (-1) << width);
2649 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2650 l1s |= ((HOST_WIDE_INT) (-1) << width);
2652 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2653 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2655 equal = (h0u == h1u && l0u == l1u);
2656 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2657 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2658 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2659 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2662 /* Otherwise, there are some code-specific tests we can make. */
2663 else
2665 switch (code)
2667 case EQ:
2668 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2669 return const0_rtx;
2670 break;
2672 case NE:
2673 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2674 return const_true_rtx;
2675 break;
2677 case GEU:
2678 /* Unsigned values are never negative. */
2679 if (trueop1 == const0_rtx)
2680 return const_true_rtx;
2681 break;
2683 case LTU:
2684 if (trueop1 == const0_rtx)
2685 return const0_rtx;
2686 break;
2688 case LEU:
2689 /* Unsigned values are never greater than the largest
2690 unsigned value. */
2691 if (GET_CODE (trueop1) == CONST_INT
2692 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2693 && INTEGRAL_MODE_P (mode))
2694 return const_true_rtx;
2695 break;
2697 case GTU:
2698 if (GET_CODE (trueop1) == CONST_INT
2699 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2700 && INTEGRAL_MODE_P (mode))
2701 return const0_rtx;
2702 break;
2704 case LT:
2705 /* Optimize abs(x) < 0.0. */
2706 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2708 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2709 : trueop0;
2710 if (GET_CODE (tem) == ABS)
2711 return const0_rtx;
2713 break;
2715 case GE:
2716 /* Optimize abs(x) >= 0.0. */
2717 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2719 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2720 : trueop0;
2721 if (GET_CODE (tem) == ABS)
2722 return const_true_rtx;
2724 break;
2726 case UNGE:
2727 /* Optimize ! (abs(x) < 0.0). */
2728 if (trueop1 == CONST0_RTX (mode))
2730 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2731 : trueop0;
2732 if (GET_CODE (tem) == ABS)
2733 return const_true_rtx;
2735 break;
2737 default:
2738 break;
2741 return 0;
2744 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2745 as appropriate. */
2746 switch (code)
2748 case EQ:
2749 case UNEQ:
2750 return equal ? const_true_rtx : const0_rtx;
2751 case NE:
2752 case LTGT:
2753 return ! equal ? const_true_rtx : const0_rtx;
2754 case LT:
2755 case UNLT:
2756 return op0lt ? const_true_rtx : const0_rtx;
2757 case GT:
2758 case UNGT:
2759 return op1lt ? const_true_rtx : const0_rtx;
2760 case LTU:
2761 return op0ltu ? const_true_rtx : const0_rtx;
2762 case GTU:
2763 return op1ltu ? const_true_rtx : const0_rtx;
2764 case LE:
2765 case UNLE:
2766 return equal || op0lt ? const_true_rtx : const0_rtx;
2767 case GE:
2768 case UNGE:
2769 return equal || op1lt ? const_true_rtx : const0_rtx;
2770 case LEU:
2771 return equal || op0ltu ? const_true_rtx : const0_rtx;
2772 case GEU:
2773 return equal || op1ltu ? const_true_rtx : const0_rtx;
2774 case ORDERED:
2775 return const_true_rtx;
2776 case UNORDERED:
2777 return const0_rtx;
2778 default:
2779 abort ();
2783 /* Simplify CODE, an operation with result mode MODE and three operands,
2784 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2785 a constant. Return 0 if no simplifications is possible. */
2788 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2789 enum machine_mode op0_mode, rtx op0, rtx op1,
2790 rtx op2)
2792 unsigned int width = GET_MODE_BITSIZE (mode);
2794 /* VOIDmode means "infinite" precision. */
2795 if (width == 0)
2796 width = HOST_BITS_PER_WIDE_INT;
2798 switch (code)
2800 case SIGN_EXTRACT:
2801 case ZERO_EXTRACT:
2802 if (GET_CODE (op0) == CONST_INT
2803 && GET_CODE (op1) == CONST_INT
2804 && GET_CODE (op2) == CONST_INT
2805 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2806 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2808 /* Extracting a bit-field from a constant */
2809 HOST_WIDE_INT val = INTVAL (op0);
2811 if (BITS_BIG_ENDIAN)
2812 val >>= (GET_MODE_BITSIZE (op0_mode)
2813 - INTVAL (op2) - INTVAL (op1));
2814 else
2815 val >>= INTVAL (op2);
2817 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2819 /* First zero-extend. */
2820 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2821 /* If desired, propagate sign bit. */
2822 if (code == SIGN_EXTRACT
2823 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2824 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2827 /* Clear the bits that don't belong in our mode,
2828 unless they and our sign bit are all one.
2829 So we get either a reasonable negative value or a reasonable
2830 unsigned value for this mode. */
2831 if (width < HOST_BITS_PER_WIDE_INT
2832 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2833 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2834 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2836 return GEN_INT (val);
2838 break;
2840 case IF_THEN_ELSE:
2841 if (GET_CODE (op0) == CONST_INT)
2842 return op0 != const0_rtx ? op1 : op2;
2844 /* Convert c ? a : a into "a". */
2845 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2846 return op1;
2848 /* Convert a != b ? a : b into "a". */
2849 if (GET_CODE (op0) == NE
2850 && ! side_effects_p (op0)
2851 && ! HONOR_NANS (mode)
2852 && ! HONOR_SIGNED_ZEROS (mode)
2853 && ((rtx_equal_p (XEXP (op0, 0), op1)
2854 && rtx_equal_p (XEXP (op0, 1), op2))
2855 || (rtx_equal_p (XEXP (op0, 0), op2)
2856 && rtx_equal_p (XEXP (op0, 1), op1))))
2857 return op1;
2859 /* Convert a == b ? a : b into "b". */
2860 if (GET_CODE (op0) == EQ
2861 && ! side_effects_p (op0)
2862 && ! HONOR_NANS (mode)
2863 && ! HONOR_SIGNED_ZEROS (mode)
2864 && ((rtx_equal_p (XEXP (op0, 0), op1)
2865 && rtx_equal_p (XEXP (op0, 1), op2))
2866 || (rtx_equal_p (XEXP (op0, 0), op2)
2867 && rtx_equal_p (XEXP (op0, 1), op1))))
2868 return op2;
2870 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2872 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2873 ? GET_MODE (XEXP (op0, 1))
2874 : GET_MODE (XEXP (op0, 0)));
2875 rtx temp;
2876 if (cmp_mode == VOIDmode)
2877 cmp_mode = op0_mode;
2878 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2879 XEXP (op0, 0), XEXP (op0, 1));
2881 /* See if any simplifications were possible. */
2882 if (temp == const0_rtx)
2883 return op2;
2884 else if (temp == const_true_rtx)
2885 return op1;
2886 else if (temp)
2887 abort ();
2889 /* Look for happy constants in op1 and op2. */
2890 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2892 HOST_WIDE_INT t = INTVAL (op1);
2893 HOST_WIDE_INT f = INTVAL (op2);
2895 if (t == STORE_FLAG_VALUE && f == 0)
2896 code = GET_CODE (op0);
2897 else if (t == 0 && f == STORE_FLAG_VALUE)
2899 enum rtx_code tmp;
2900 tmp = reversed_comparison_code (op0, NULL_RTX);
2901 if (tmp == UNKNOWN)
2902 break;
2903 code = tmp;
2905 else
2906 break;
2908 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2911 break;
2913 case VEC_MERGE:
2914 if (GET_MODE (op0) != mode
2915 || GET_MODE (op1) != mode
2916 || !VECTOR_MODE_P (mode))
2917 abort ();
2918 op2 = avoid_constant_pool_reference (op2);
2919 if (GET_CODE (op2) == CONST_INT)
2921 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2922 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2923 int mask = (1 << n_elts) - 1;
2925 if (!(INTVAL (op2) & mask))
2926 return op1;
2927 if ((INTVAL (op2) & mask) == mask)
2928 return op0;
2930 op0 = avoid_constant_pool_reference (op0);
2931 op1 = avoid_constant_pool_reference (op1);
2932 if (GET_CODE (op0) == CONST_VECTOR
2933 && GET_CODE (op1) == CONST_VECTOR)
2935 rtvec v = rtvec_alloc (n_elts);
2936 unsigned int i;
2938 for (i = 0; i < n_elts; i++)
2939 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2940 ? CONST_VECTOR_ELT (op0, i)
2941 : CONST_VECTOR_ELT (op1, i));
2942 return gen_rtx_CONST_VECTOR (mode, v);
2945 break;
2947 default:
2948 abort ();
2951 return 0;
2954 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
2955 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
2957 Works by unpacking OP into a collection of 8-bit values
2958 represented as a little-endian array of 'unsigned char', selecting by BYTE,
2959 and then repacking them again for OUTERMODE. */
2961 static rtx
2962 simplify_immed_subreg (enum machine_mode outermode, rtx op,
2963 enum machine_mode innermode, unsigned int byte)
2965 /* We support up to 512-bit values (for V8DFmode). */
2966 enum {
2967 max_bitsize = 512,
2968 value_bit = 8,
2969 value_mask = (1 << value_bit) - 1
2971 unsigned char value[max_bitsize / value_bit];
2972 int value_start;
2973 int i;
2974 int elem;
2976 int num_elem;
2977 rtx * elems;
2978 int elem_bitsize;
2979 rtx result_s;
2980 rtvec result_v = NULL;
2981 enum mode_class outer_class;
2982 enum machine_mode outer_submode;
2984 /* Some ports misuse CCmode. */
2985 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
2986 return op;
2988 /* Unpack the value. */
2990 if (GET_CODE (op) == CONST_VECTOR)
2992 num_elem = CONST_VECTOR_NUNITS (op);
2993 elems = &CONST_VECTOR_ELT (op, 0);
2994 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
2996 else
2998 num_elem = 1;
2999 elems = &op;
3000 elem_bitsize = max_bitsize;
3003 if (BITS_PER_UNIT % value_bit != 0)
3004 abort (); /* Too complicated; reducing value_bit may help. */
3005 if (elem_bitsize % BITS_PER_UNIT != 0)
3006 abort (); /* I don't know how to handle endianness of sub-units. */
3008 for (elem = 0; elem < num_elem; elem++)
3010 unsigned char * vp;
3011 rtx el = elems[elem];
3013 /* Vectors are kept in target memory order. (This is probably
3014 a mistake.) */
3016 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3017 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3018 / BITS_PER_UNIT);
3019 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3020 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3021 unsigned bytele = (subword_byte % UNITS_PER_WORD
3022 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3023 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3026 switch (GET_CODE (el))
3028 case CONST_INT:
3029 for (i = 0;
3030 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3031 i += value_bit)
3032 *vp++ = INTVAL (el) >> i;
3033 /* CONST_INTs are always logically sign-extended. */
3034 for (; i < elem_bitsize; i += value_bit)
3035 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3036 break;
3038 case CONST_DOUBLE:
3039 if (GET_MODE (el) == VOIDmode)
3041 /* If this triggers, someone should have generated a
3042 CONST_INT instead. */
3043 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3044 abort ();
3046 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3047 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3048 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3050 *vp++ = CONST_DOUBLE_HIGH (el) >> i;
3051 i += value_bit;
3053 /* It shouldn't matter what's done here, so fill it with
3054 zero. */
3055 for (; i < max_bitsize; i += value_bit)
3056 *vp++ = 0;
3058 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3060 long tmp[max_bitsize / 32];
3061 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3063 if (bitsize > elem_bitsize)
3064 abort ();
3065 if (bitsize % value_bit != 0)
3066 abort ();
3068 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3069 GET_MODE (el));
3071 /* real_to_target produces its result in words affected by
3072 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3073 and use WORDS_BIG_ENDIAN instead; see the documentation
3074 of SUBREG in rtl.texi. */
3075 for (i = 0; i < bitsize; i += value_bit)
3077 int ibase;
3078 if (WORDS_BIG_ENDIAN)
3079 ibase = bitsize - 1 - i;
3080 else
3081 ibase = i;
3082 *vp++ = tmp[ibase / 32] >> i % 32;
3085 /* It shouldn't matter what's done here, so fill it with
3086 zero. */
3087 for (; i < elem_bitsize; i += value_bit)
3088 *vp++ = 0;
3090 else
3091 abort ();
3092 break;
3094 default:
3095 abort ();
3099 /* Now, pick the right byte to start with. */
3100 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3101 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3102 will already have offset 0. */
3103 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3105 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3106 - byte);
3107 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3108 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3109 byte = (subword_byte % UNITS_PER_WORD
3110 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3113 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3114 so if it's become negative it will instead be very large.) */
3115 if (byte >= GET_MODE_SIZE (innermode))
3116 abort ();
3118 /* Convert from bytes to chunks of size value_bit. */
3119 value_start = byte * (BITS_PER_UNIT / value_bit);
3121 /* Re-pack the value. */
3123 if (VECTOR_MODE_P (outermode))
3125 num_elem = GET_MODE_NUNITS (outermode);
3126 result_v = rtvec_alloc (num_elem);
3127 elems = &RTVEC_ELT (result_v, 0);
3128 outer_submode = GET_MODE_INNER (outermode);
3130 else
3132 num_elem = 1;
3133 elems = &result_s;
3134 outer_submode = outermode;
3137 outer_class = GET_MODE_CLASS (outer_submode);
3138 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3140 if (elem_bitsize % value_bit != 0)
3141 abort ();
3142 if (elem_bitsize + value_start * value_bit > max_bitsize)
3143 abort ();
3145 for (elem = 0; elem < num_elem; elem++)
3147 unsigned char *vp;
3149 /* Vectors are stored in target memory order. (This is probably
3150 a mistake.) */
3152 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3153 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3154 / BITS_PER_UNIT);
3155 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3156 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3157 unsigned bytele = (subword_byte % UNITS_PER_WORD
3158 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3159 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3162 switch (outer_class)
3164 case MODE_INT:
3165 case MODE_PARTIAL_INT:
3167 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3169 for (i = 0;
3170 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3171 i += value_bit)
3172 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3173 for (; i < elem_bitsize; i += value_bit)
3174 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3175 << (i - HOST_BITS_PER_WIDE_INT));
3177 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3178 know why. */
3179 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3180 elems[elem] = gen_int_mode (lo, outer_submode);
3181 else
3182 elems[elem] = immed_double_const (lo, hi, outer_submode);
3184 break;
3186 case MODE_FLOAT:
3188 REAL_VALUE_TYPE r;
3189 long tmp[max_bitsize / 32];
3191 /* real_from_target wants its input in words affected by
3192 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3193 and use WORDS_BIG_ENDIAN instead; see the documentation
3194 of SUBREG in rtl.texi. */
3195 for (i = 0; i < max_bitsize / 32; i++)
3196 tmp[i] = 0;
3197 for (i = 0; i < elem_bitsize; i += value_bit)
3199 int ibase;
3200 if (WORDS_BIG_ENDIAN)
3201 ibase = elem_bitsize - 1 - i;
3202 else
3203 ibase = i;
3204 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3207 real_from_target (&r, tmp, outer_submode);
3208 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3210 break;
3212 default:
3213 abort ();
3216 if (VECTOR_MODE_P (outermode))
3217 return gen_rtx_CONST_VECTOR (outermode, result_v);
3218 else
3219 return result_s;
3222 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3223 Return 0 if no simplifications are possible. */
3225 simplify_subreg (enum machine_mode outermode, rtx op,
3226 enum machine_mode innermode, unsigned int byte)
3228 /* Little bit of sanity checking. */
3229 if (innermode == VOIDmode || outermode == VOIDmode
3230 || innermode == BLKmode || outermode == BLKmode)
3231 abort ();
3233 if (GET_MODE (op) != innermode
3234 && GET_MODE (op) != VOIDmode)
3235 abort ();
3237 if (byte % GET_MODE_SIZE (outermode)
3238 || byte >= GET_MODE_SIZE (innermode))
3239 abort ();
3241 if (outermode == innermode && !byte)
3242 return op;
3244 if (GET_CODE (op) == CONST_INT
3245 || GET_CODE (op) == CONST_DOUBLE
3246 || GET_CODE (op) == CONST_VECTOR)
3247 return simplify_immed_subreg (outermode, op, innermode, byte);
3249 /* Changing mode twice with SUBREG => just change it once,
3250 or not at all if changing back op starting mode. */
3251 if (GET_CODE (op) == SUBREG)
3253 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3254 int final_offset = byte + SUBREG_BYTE (op);
3255 rtx new;
3257 if (outermode == innermostmode
3258 && byte == 0 && SUBREG_BYTE (op) == 0)
3259 return SUBREG_REG (op);
3261 /* The SUBREG_BYTE represents offset, as if the value were stored
3262 in memory. Irritating exception is paradoxical subreg, where
3263 we define SUBREG_BYTE to be 0. On big endian machines, this
3264 value should be negative. For a moment, undo this exception. */
3265 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3267 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3268 if (WORDS_BIG_ENDIAN)
3269 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3270 if (BYTES_BIG_ENDIAN)
3271 final_offset += difference % UNITS_PER_WORD;
3273 if (SUBREG_BYTE (op) == 0
3274 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3276 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3277 if (WORDS_BIG_ENDIAN)
3278 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3279 if (BYTES_BIG_ENDIAN)
3280 final_offset += difference % UNITS_PER_WORD;
3283 /* See whether resulting subreg will be paradoxical. */
3284 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3286 /* In nonparadoxical subregs we can't handle negative offsets. */
3287 if (final_offset < 0)
3288 return NULL_RTX;
3289 /* Bail out in case resulting subreg would be incorrect. */
3290 if (final_offset % GET_MODE_SIZE (outermode)
3291 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3292 return NULL_RTX;
3294 else
3296 int offset = 0;
3297 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3299 /* In paradoxical subreg, see if we are still looking on lower part.
3300 If so, our SUBREG_BYTE will be 0. */
3301 if (WORDS_BIG_ENDIAN)
3302 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3303 if (BYTES_BIG_ENDIAN)
3304 offset += difference % UNITS_PER_WORD;
3305 if (offset == final_offset)
3306 final_offset = 0;
3307 else
3308 return NULL_RTX;
3311 /* Recurse for further possible simplifications. */
3312 new = simplify_subreg (outermode, SUBREG_REG (op),
3313 GET_MODE (SUBREG_REG (op)),
3314 final_offset);
3315 if (new)
3316 return new;
3317 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3320 /* SUBREG of a hard register => just change the register number
3321 and/or mode. If the hard register is not valid in that mode,
3322 suppress this simplification. If the hard register is the stack,
3323 frame, or argument pointer, leave this as a SUBREG. */
3325 if (REG_P (op)
3326 && (! REG_FUNCTION_VALUE_P (op)
3327 || ! rtx_equal_function_value_matters)
3328 && REGNO (op) < FIRST_PSEUDO_REGISTER
3329 #ifdef CANNOT_CHANGE_MODE_CLASS
3330 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3331 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3332 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3333 #endif
3334 && ((reload_completed && !frame_pointer_needed)
3335 || (REGNO (op) != FRAME_POINTER_REGNUM
3336 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3337 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3338 #endif
3340 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3341 && REGNO (op) != ARG_POINTER_REGNUM
3342 #endif
3343 && REGNO (op) != STACK_POINTER_REGNUM
3344 && subreg_offset_representable_p (REGNO (op), innermode,
3345 byte, outermode))
3347 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3348 int final_regno = subreg_hard_regno (tem, 0);
3350 /* ??? We do allow it if the current REG is not valid for
3351 its mode. This is a kludge to work around how float/complex
3352 arguments are passed on 32-bit SPARC and should be fixed. */
3353 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3354 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3356 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3358 /* Propagate original regno. We don't have any way to specify
3359 the offset inside original regno, so do so only for lowpart.
3360 The information is used only by alias analysis that can not
3361 grog partial register anyway. */
3363 if (subreg_lowpart_offset (outermode, innermode) == byte)
3364 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3365 return x;
3369 /* If we have a SUBREG of a register that we are replacing and we are
3370 replacing it with a MEM, make a new MEM and try replacing the
3371 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3372 or if we would be widening it. */
3374 if (GET_CODE (op) == MEM
3375 && ! mode_dependent_address_p (XEXP (op, 0))
3376 /* Allow splitting of volatile memory references in case we don't
3377 have instruction to move the whole thing. */
3378 && (! MEM_VOLATILE_P (op)
3379 || ! have_insn_for (SET, innermode))
3380 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3381 return adjust_address_nv (op, outermode, byte);
3383 /* Handle complex values represented as CONCAT
3384 of real and imaginary part. */
3385 if (GET_CODE (op) == CONCAT)
3387 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3388 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3389 unsigned int final_offset;
3390 rtx res;
3392 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3393 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3394 if (res)
3395 return res;
3396 /* We can at least simplify it by referring directly to the relevant part. */
3397 return gen_rtx_SUBREG (outermode, part, final_offset);
3400 return NULL_RTX;
3403 /* Make a SUBREG operation or equivalent if it folds. */
3406 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3407 enum machine_mode innermode, unsigned int byte)
3409 rtx new;
3410 /* Little bit of sanity checking. */
3411 if (innermode == VOIDmode || outermode == VOIDmode
3412 || innermode == BLKmode || outermode == BLKmode)
3413 abort ();
3415 if (GET_MODE (op) != innermode
3416 && GET_MODE (op) != VOIDmode)
3417 abort ();
3419 if (byte % GET_MODE_SIZE (outermode)
3420 || byte >= GET_MODE_SIZE (innermode))
3421 abort ();
3423 if (GET_CODE (op) == QUEUED)
3424 return NULL_RTX;
3426 new = simplify_subreg (outermode, op, innermode, byte);
3427 if (new)
3428 return new;
3430 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3431 return NULL_RTX;
3433 return gen_rtx_SUBREG (outermode, op, byte);
3435 /* Simplify X, an rtx expression.
3437 Return the simplified expression or NULL if no simplifications
3438 were possible.
3440 This is the preferred entry point into the simplification routines;
3441 however, we still allow passes to call the more specific routines.
3443 Right now GCC has three (yes, three) major bodies of RTL simplification
3444 code that need to be unified.
3446 1. fold_rtx in cse.c. This code uses various CSE specific
3447 information to aid in RTL simplification.
3449 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3450 it uses combine specific information to aid in RTL
3451 simplification.
3453 3. The routines in this file.
3456 Long term we want to only have one body of simplification code; to
3457 get to that state I recommend the following steps:
3459 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3460 which are not pass dependent state into these routines.
3462 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3463 use this routine whenever possible.
3465 3. Allow for pass dependent state to be provided to these
3466 routines and add simplifications based on the pass dependent
3467 state. Remove code from cse.c & combine.c that becomes
3468 redundant/dead.
3470 It will take time, but ultimately the compiler will be easier to
3471 maintain and improve. It's totally silly that when we add a
3472 simplification that it needs to be added to 4 places (3 for RTL
3473 simplification and 1 for tree simplification. */
3476 simplify_rtx (rtx x)
3478 enum rtx_code code = GET_CODE (x);
3479 enum machine_mode mode = GET_MODE (x);
3480 rtx temp;
3482 switch (GET_RTX_CLASS (code))
3484 case '1':
3485 return simplify_unary_operation (code, mode,
3486 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3487 case 'c':
3488 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3489 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3491 /* Fall through.... */
3493 case '2':
3494 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3496 case '3':
3497 case 'b':
3498 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3499 XEXP (x, 0), XEXP (x, 1),
3500 XEXP (x, 2));
3502 case '<':
3503 temp = simplify_relational_operation (code,
3504 ((GET_MODE (XEXP (x, 0))
3505 != VOIDmode)
3506 ? GET_MODE (XEXP (x, 0))
3507 : GET_MODE (XEXP (x, 1))),
3508 XEXP (x, 0), XEXP (x, 1));
3509 #ifdef FLOAT_STORE_FLAG_VALUE
3510 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3512 if (temp == const0_rtx)
3513 temp = CONST0_RTX (mode);
3514 else
3515 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3516 mode);
3518 #endif
3519 return temp;
3521 case 'x':
3522 if (code == SUBREG)
3523 return simplify_gen_subreg (mode, SUBREG_REG (x),
3524 GET_MODE (SUBREG_REG (x)),
3525 SUBREG_BYTE (x));
3526 if (code == CONSTANT_P_RTX)
3528 if (CONSTANT_P (XEXP (x, 0)))
3529 return const1_rtx;
3531 break;
3533 case 'o':
3534 if (code == LO_SUM)
3536 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3537 if (GET_CODE (XEXP (x, 0)) == HIGH
3538 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3539 return XEXP (x, 1);
3541 break;
3543 default:
3544 break;
3546 return NULL;