* regclass.c (choose_hard_reg_mode): Add third argument.
[official-gcc.git] / gcc / simplify-rtx.c
blob27fe4f377b67de6be2d29a1b6e976c71beb51c63
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55 rtx, int);
57 /* Negate a CONST_INT rtx, truncating (because a conversion from a
58 maximally negative number can overflow). */
59 static rtx
60 neg_const_int (enum machine_mode mode, rtx i)
62 return gen_int_mode (- INTVAL (i), mode);
66 /* Make a binary operation by properly ordering the operands and
67 seeing if the expression folds. */
69 rtx
70 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
71 rtx op1)
73 rtx tem;
75 /* Put complex operands first and constants second if commutative. */
76 if (GET_RTX_CLASS (code) == 'c'
77 && swap_commutative_operands_p (op0, op1))
78 tem = op0, op0 = op1, op1 = tem;
80 /* If this simplifies, do it. */
81 tem = simplify_binary_operation (code, mode, op0, op1);
82 if (tem)
83 return tem;
85 /* Handle addition and subtraction specially. Otherwise, just form
86 the operation. */
88 if (code == PLUS || code == MINUS)
90 tem = simplify_plus_minus (code, mode, op0, op1, 1);
91 if (tem)
92 return tem;
95 return gen_rtx_fmt_ee (code, mode, op0, op1);
98 /* If X is a MEM referencing the constant pool, return the real value.
99 Otherwise return X. */
101 avoid_constant_pool_reference (rtx x)
103 rtx c, tmp, addr;
104 enum machine_mode cmode;
106 switch (GET_CODE (x))
108 case MEM:
109 break;
111 case FLOAT_EXTEND:
112 /* Handle float extensions of constant pool references. */
113 tmp = XEXP (x, 0);
114 c = avoid_constant_pool_reference (tmp);
115 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
117 REAL_VALUE_TYPE d;
119 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
120 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
122 return x;
124 default:
125 return x;
128 addr = XEXP (x, 0);
130 /* Call target hook to avoid the effects of -fpic etc... */
131 addr = (*targetm.delegitimize_address) (addr);
133 if (GET_CODE (addr) == LO_SUM)
134 addr = XEXP (addr, 1);
136 if (GET_CODE (addr) != SYMBOL_REF
137 || ! CONSTANT_POOL_ADDRESS_P (addr))
138 return x;
140 c = get_pool_constant (addr);
141 cmode = get_pool_mode (addr);
143 /* If we're accessing the constant in a different mode than it was
144 originally stored, attempt to fix that up via subreg simplifications.
145 If that fails we have no choice but to return the original memory. */
146 if (cmode != GET_MODE (x))
148 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
149 return c ? c : x;
152 return c;
155 /* Make a unary operation by first seeing if it folds and otherwise making
156 the specified operation. */
159 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
160 enum machine_mode op_mode)
162 rtx tem;
164 /* If this simplifies, use it. */
165 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
166 return tem;
168 return gen_rtx_fmt_e (code, mode, op);
171 /* Likewise for ternary operations. */
174 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
175 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
177 rtx tem;
179 /* If this simplifies, use it. */
180 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
181 op0, op1, op2)))
182 return tem;
184 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
187 /* Likewise, for relational operations.
188 CMP_MODE specifies mode comparison is done in.
192 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
193 enum machine_mode cmp_mode, rtx op0, rtx op1)
195 rtx tem;
197 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
198 return tem;
200 /* For the following tests, ensure const0_rtx is op1. */
201 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
202 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
204 /* If op0 is a compare, extract the comparison arguments from it. */
205 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
206 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
208 /* If op0 is a comparison, extract the comparison arguments form it. */
209 if (code == NE && op1 == const0_rtx
210 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
211 return op0;
212 else if (code == EQ && op1 == const0_rtx)
214 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
215 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
216 if (new != UNKNOWN)
218 code = new;
219 mode = cmp_mode;
220 op1 = XEXP (op0, 1);
221 op0 = XEXP (op0, 0);
225 /* Put complex operands first and constants second. */
226 if (swap_commutative_operands_p (op0, op1))
227 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
229 return gen_rtx_fmt_ee (code, mode, op0, op1);
232 /* Replace all occurrences of OLD in X with NEW and try to simplify the
233 resulting RTX. Return a new RTX which is as simplified as possible. */
236 simplify_replace_rtx (rtx x, rtx old, rtx new)
238 enum rtx_code code = GET_CODE (x);
239 enum machine_mode mode = GET_MODE (x);
241 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
242 to build a new expression substituting recursively. If we can't do
243 anything, return our input. */
245 if (x == old)
246 return new;
248 switch (GET_RTX_CLASS (code))
250 case '1':
252 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
253 rtx op = (XEXP (x, 0) == old
254 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
256 return simplify_gen_unary (code, mode, op, op_mode);
259 case '2':
260 case 'c':
261 return
262 simplify_gen_binary (code, mode,
263 simplify_replace_rtx (XEXP (x, 0), old, new),
264 simplify_replace_rtx (XEXP (x, 1), old, new));
265 case '<':
267 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
268 ? GET_MODE (XEXP (x, 0))
269 : GET_MODE (XEXP (x, 1)));
270 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
271 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
273 return
274 simplify_gen_relational (code, mode,
275 (op_mode != VOIDmode
276 ? op_mode
277 : GET_MODE (op0) != VOIDmode
278 ? GET_MODE (op0)
279 : GET_MODE (op1)),
280 op0, op1);
283 case '3':
284 case 'b':
286 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
287 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
289 return
290 simplify_gen_ternary (code, mode,
291 (op_mode != VOIDmode
292 ? op_mode
293 : GET_MODE (op0)),
294 op0,
295 simplify_replace_rtx (XEXP (x, 1), old, new),
296 simplify_replace_rtx (XEXP (x, 2), old, new));
299 case 'x':
300 /* The only case we try to handle is a SUBREG. */
301 if (code == SUBREG)
303 rtx exp;
304 exp = simplify_gen_subreg (GET_MODE (x),
305 simplify_replace_rtx (SUBREG_REG (x),
306 old, new),
307 GET_MODE (SUBREG_REG (x)),
308 SUBREG_BYTE (x));
309 if (exp)
310 x = exp;
312 return x;
314 case 'o':
315 if (code == MEM)
316 return replace_equiv_address_nv (x,
317 simplify_replace_rtx (XEXP (x, 0),
318 old, new));
319 else if (code == LO_SUM)
321 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
322 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
324 /* (lo_sum (high x) x) -> x */
325 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
326 return op1;
328 return gen_rtx_LO_SUM (mode, op0, op1);
330 else if (code == REG)
332 if (REG_P (old) && REGNO (x) == REGNO (old))
333 return new;
336 return x;
338 default:
339 return x;
341 return x;
344 /* Try to simplify a unary operation CODE whose output mode is to be
345 MODE with input operand OP whose mode was originally OP_MODE.
346 Return zero if no simplification can be made. */
348 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
349 rtx op, enum machine_mode op_mode)
351 unsigned int width = GET_MODE_BITSIZE (mode);
352 rtx trueop = avoid_constant_pool_reference (op);
354 if (code == VEC_DUPLICATE)
356 if (!VECTOR_MODE_P (mode))
357 abort ();
358 if (GET_MODE (trueop) != VOIDmode
359 && !VECTOR_MODE_P (GET_MODE (trueop))
360 && GET_MODE_INNER (mode) != GET_MODE (trueop))
361 abort ();
362 if (GET_MODE (trueop) != VOIDmode
363 && VECTOR_MODE_P (GET_MODE (trueop))
364 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
365 abort ();
366 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
367 || GET_CODE (trueop) == CONST_VECTOR)
369 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
370 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
371 rtvec v = rtvec_alloc (n_elts);
372 unsigned int i;
374 if (GET_CODE (trueop) != CONST_VECTOR)
375 for (i = 0; i < n_elts; i++)
376 RTVEC_ELT (v, i) = trueop;
377 else
379 enum machine_mode inmode = GET_MODE (trueop);
380 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
381 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
383 if (in_n_elts >= n_elts || n_elts % in_n_elts)
384 abort ();
385 for (i = 0; i < n_elts; i++)
386 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
388 return gen_rtx_CONST_VECTOR (mode, v);
392 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
394 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
395 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
396 enum machine_mode opmode = GET_MODE (trueop);
397 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
398 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
399 rtvec v = rtvec_alloc (n_elts);
400 unsigned int i;
402 if (op_n_elts != n_elts)
403 abort ();
405 for (i = 0; i < n_elts; i++)
407 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
408 CONST_VECTOR_ELT (trueop, i),
409 GET_MODE_INNER (opmode));
410 if (!x)
411 return 0;
412 RTVEC_ELT (v, i) = x;
414 return gen_rtx_CONST_VECTOR (mode, v);
417 /* The order of these tests is critical so that, for example, we don't
418 check the wrong mode (input vs. output) for a conversion operation,
419 such as FIX. At some point, this should be simplified. */
421 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
422 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
424 HOST_WIDE_INT hv, lv;
425 REAL_VALUE_TYPE d;
427 if (GET_CODE (trueop) == CONST_INT)
428 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
429 else
430 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
432 REAL_VALUE_FROM_INT (d, lv, hv, mode);
433 d = real_value_truncate (mode, d);
434 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
436 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
437 && (GET_CODE (trueop) == CONST_DOUBLE
438 || GET_CODE (trueop) == CONST_INT))
440 HOST_WIDE_INT hv, lv;
441 REAL_VALUE_TYPE d;
443 if (GET_CODE (trueop) == CONST_INT)
444 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
445 else
446 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
448 if (op_mode == VOIDmode)
450 /* We don't know how to interpret negative-looking numbers in
451 this case, so don't try to fold those. */
452 if (hv < 0)
453 return 0;
455 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
457 else
458 hv = 0, lv &= GET_MODE_MASK (op_mode);
460 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
461 d = real_value_truncate (mode, d);
462 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
465 if (GET_CODE (trueop) == CONST_INT
466 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
468 HOST_WIDE_INT arg0 = INTVAL (trueop);
469 HOST_WIDE_INT val;
471 switch (code)
473 case NOT:
474 val = ~ arg0;
475 break;
477 case NEG:
478 val = - arg0;
479 break;
481 case ABS:
482 val = (arg0 >= 0 ? arg0 : - arg0);
483 break;
485 case FFS:
486 /* Don't use ffs here. Instead, get low order bit and then its
487 number. If arg0 is zero, this will return 0, as desired. */
488 arg0 &= GET_MODE_MASK (mode);
489 val = exact_log2 (arg0 & (- arg0)) + 1;
490 break;
492 case CLZ:
493 arg0 &= GET_MODE_MASK (mode);
494 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
496 else
497 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
498 break;
500 case CTZ:
501 arg0 &= GET_MODE_MASK (mode);
502 if (arg0 == 0)
504 /* Even if the value at zero is undefined, we have to come
505 up with some replacement. Seems good enough. */
506 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
507 val = GET_MODE_BITSIZE (mode);
509 else
510 val = exact_log2 (arg0 & -arg0);
511 break;
513 case POPCOUNT:
514 arg0 &= GET_MODE_MASK (mode);
515 val = 0;
516 while (arg0)
517 val++, arg0 &= arg0 - 1;
518 break;
520 case PARITY:
521 arg0 &= GET_MODE_MASK (mode);
522 val = 0;
523 while (arg0)
524 val++, arg0 &= arg0 - 1;
525 val &= 1;
526 break;
528 case TRUNCATE:
529 val = arg0;
530 break;
532 case ZERO_EXTEND:
533 /* When zero-extending a CONST_INT, we need to know its
534 original mode. */
535 if (op_mode == VOIDmode)
536 abort ();
537 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
539 /* If we were really extending the mode,
540 we would have to distinguish between zero-extension
541 and sign-extension. */
542 if (width != GET_MODE_BITSIZE (op_mode))
543 abort ();
544 val = arg0;
546 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
547 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
548 else
549 return 0;
550 break;
552 case SIGN_EXTEND:
553 if (op_mode == VOIDmode)
554 op_mode = mode;
555 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
557 /* If we were really extending the mode,
558 we would have to distinguish between zero-extension
559 and sign-extension. */
560 if (width != GET_MODE_BITSIZE (op_mode))
561 abort ();
562 val = arg0;
564 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
567 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
568 if (val
569 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
570 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
572 else
573 return 0;
574 break;
576 case SQRT:
577 case FLOAT_EXTEND:
578 case FLOAT_TRUNCATE:
579 case SS_TRUNCATE:
580 case US_TRUNCATE:
581 return 0;
583 default:
584 abort ();
587 val = trunc_int_for_mode (val, mode);
589 return GEN_INT (val);
592 /* We can do some operations on integer CONST_DOUBLEs. Also allow
593 for a DImode operation on a CONST_INT. */
594 else if (GET_MODE (trueop) == VOIDmode
595 && width <= HOST_BITS_PER_WIDE_INT * 2
596 && (GET_CODE (trueop) == CONST_DOUBLE
597 || GET_CODE (trueop) == CONST_INT))
599 unsigned HOST_WIDE_INT l1, lv;
600 HOST_WIDE_INT h1, hv;
602 if (GET_CODE (trueop) == CONST_DOUBLE)
603 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
604 else
605 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
607 switch (code)
609 case NOT:
610 lv = ~ l1;
611 hv = ~ h1;
612 break;
614 case NEG:
615 neg_double (l1, h1, &lv, &hv);
616 break;
618 case ABS:
619 if (h1 < 0)
620 neg_double (l1, h1, &lv, &hv);
621 else
622 lv = l1, hv = h1;
623 break;
625 case FFS:
626 hv = 0;
627 if (l1 == 0)
629 if (h1 == 0)
630 lv = 0;
631 else
632 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
634 else
635 lv = exact_log2 (l1 & -l1) + 1;
636 break;
638 case CLZ:
639 hv = 0;
640 if (h1 == 0)
641 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
642 else
643 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
644 - HOST_BITS_PER_WIDE_INT;
645 break;
647 case CTZ:
648 hv = 0;
649 if (l1 == 0)
651 if (h1 == 0)
652 lv = GET_MODE_BITSIZE (mode);
653 else
654 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
656 else
657 lv = exact_log2 (l1 & -l1);
658 break;
660 case POPCOUNT:
661 hv = 0;
662 lv = 0;
663 while (l1)
664 lv++, l1 &= l1 - 1;
665 while (h1)
666 lv++, h1 &= h1 - 1;
667 break;
669 case PARITY:
670 hv = 0;
671 lv = 0;
672 while (l1)
673 lv++, l1 &= l1 - 1;
674 while (h1)
675 lv++, h1 &= h1 - 1;
676 lv &= 1;
677 break;
679 case TRUNCATE:
680 /* This is just a change-of-mode, so do nothing. */
681 lv = l1, hv = h1;
682 break;
684 case ZERO_EXTEND:
685 if (op_mode == VOIDmode)
686 abort ();
688 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
689 return 0;
691 hv = 0;
692 lv = l1 & GET_MODE_MASK (op_mode);
693 break;
695 case SIGN_EXTEND:
696 if (op_mode == VOIDmode
697 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
698 return 0;
699 else
701 lv = l1 & GET_MODE_MASK (op_mode);
702 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
703 && (lv & ((HOST_WIDE_INT) 1
704 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
705 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
707 hv = HWI_SIGN_EXTEND (lv);
709 break;
711 case SQRT:
712 return 0;
714 default:
715 return 0;
718 return immed_double_const (lv, hv, mode);
721 else if (GET_CODE (trueop) == CONST_DOUBLE
722 && GET_MODE_CLASS (mode) == MODE_FLOAT)
724 REAL_VALUE_TYPE d, t;
725 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
727 switch (code)
729 case SQRT:
730 if (HONOR_SNANS (mode) && real_isnan (&d))
731 return 0;
732 real_sqrt (&t, mode, &d);
733 d = t;
734 break;
735 case ABS:
736 d = REAL_VALUE_ABS (d);
737 break;
738 case NEG:
739 d = REAL_VALUE_NEGATE (d);
740 break;
741 case FLOAT_TRUNCATE:
742 d = real_value_truncate (mode, d);
743 break;
744 case FLOAT_EXTEND:
745 /* All this does is change the mode. */
746 break;
747 case FIX:
748 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
749 break;
751 default:
752 abort ();
754 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
757 else if (GET_CODE (trueop) == CONST_DOUBLE
758 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
759 && GET_MODE_CLASS (mode) == MODE_INT
760 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
762 HOST_WIDE_INT i;
763 REAL_VALUE_TYPE d;
764 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
765 switch (code)
767 case FIX: i = REAL_VALUE_FIX (d); break;
768 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
769 default:
770 abort ();
772 return gen_int_mode (i, mode);
775 /* This was formerly used only for non-IEEE float.
776 eggert@twinsun.com says it is safe for IEEE also. */
777 else
779 enum rtx_code reversed;
780 /* There are some simplifications we can do even if the operands
781 aren't constant. */
782 switch (code)
784 case NOT:
785 /* (not (not X)) == X. */
786 if (GET_CODE (op) == NOT)
787 return XEXP (op, 0);
789 /* (not (eq X Y)) == (ne X Y), etc. */
790 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
791 && ((reversed = reversed_comparison_code (op, NULL_RTX))
792 != UNKNOWN))
793 return gen_rtx_fmt_ee (reversed,
794 op_mode, XEXP (op, 0), XEXP (op, 1));
795 break;
797 case NEG:
798 /* (neg (neg X)) == X. */
799 if (GET_CODE (op) == NEG)
800 return XEXP (op, 0);
801 break;
803 case SIGN_EXTEND:
804 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
805 becomes just the MINUS if its mode is MODE. This allows
806 folding switch statements on machines using casesi (such as
807 the VAX). */
808 if (GET_CODE (op) == TRUNCATE
809 && GET_MODE (XEXP (op, 0)) == mode
810 && GET_CODE (XEXP (op, 0)) == MINUS
811 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
812 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
813 return XEXP (op, 0);
815 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
816 if (! POINTERS_EXTEND_UNSIGNED
817 && mode == Pmode && GET_MODE (op) == ptr_mode
818 && (CONSTANT_P (op)
819 || (GET_CODE (op) == SUBREG
820 && GET_CODE (SUBREG_REG (op)) == REG
821 && REG_POINTER (SUBREG_REG (op))
822 && GET_MODE (SUBREG_REG (op)) == Pmode)))
823 return convert_memory_address (Pmode, op);
824 #endif
825 break;
827 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
828 case ZERO_EXTEND:
829 if (POINTERS_EXTEND_UNSIGNED > 0
830 && mode == Pmode && GET_MODE (op) == ptr_mode
831 && (CONSTANT_P (op)
832 || (GET_CODE (op) == SUBREG
833 && GET_CODE (SUBREG_REG (op)) == REG
834 && REG_POINTER (SUBREG_REG (op))
835 && GET_MODE (SUBREG_REG (op)) == Pmode)))
836 return convert_memory_address (Pmode, op);
837 break;
838 #endif
840 default:
841 break;
844 return 0;
848 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
849 and OP1. Return 0 if no simplification is possible.
851 Don't use this for relational operations such as EQ or LT.
852 Use simplify_relational_operation instead. */
854 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
855 rtx op0, rtx op1)
857 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
858 HOST_WIDE_INT val;
859 unsigned int width = GET_MODE_BITSIZE (mode);
860 rtx tem;
861 rtx trueop0 = avoid_constant_pool_reference (op0);
862 rtx trueop1 = avoid_constant_pool_reference (op1);
864 /* Relational operations don't work here. We must know the mode
865 of the operands in order to do the comparison correctly.
866 Assuming a full word can give incorrect results.
867 Consider comparing 128 with -128 in QImode. */
869 if (GET_RTX_CLASS (code) == '<')
870 abort ();
872 /* Make sure the constant is second. */
873 if (GET_RTX_CLASS (code) == 'c'
874 && swap_commutative_operands_p (trueop0, trueop1))
876 tem = op0, op0 = op1, op1 = tem;
877 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
880 if (VECTOR_MODE_P (mode)
881 && GET_CODE (trueop0) == CONST_VECTOR
882 && GET_CODE (trueop1) == CONST_VECTOR)
884 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
885 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
886 enum machine_mode op0mode = GET_MODE (trueop0);
887 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
888 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
889 enum machine_mode op1mode = GET_MODE (trueop1);
890 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
891 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
892 rtvec v = rtvec_alloc (n_elts);
893 unsigned int i;
895 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
896 abort ();
898 for (i = 0; i < n_elts; i++)
900 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
901 CONST_VECTOR_ELT (trueop0, i),
902 CONST_VECTOR_ELT (trueop1, i));
903 if (!x)
904 return 0;
905 RTVEC_ELT (v, i) = x;
908 return gen_rtx_CONST_VECTOR (mode, v);
911 if (GET_MODE_CLASS (mode) == MODE_FLOAT
912 && GET_CODE (trueop0) == CONST_DOUBLE
913 && GET_CODE (trueop1) == CONST_DOUBLE
914 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
916 REAL_VALUE_TYPE f0, f1, value;
918 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
919 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
920 f0 = real_value_truncate (mode, f0);
921 f1 = real_value_truncate (mode, f1);
923 if (code == DIV
924 && !MODE_HAS_INFINITIES (mode)
925 && REAL_VALUES_EQUAL (f1, dconst0))
926 return 0;
928 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
930 value = real_value_truncate (mode, value);
931 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
934 /* We can fold some multi-word operations. */
935 if (GET_MODE_CLASS (mode) == MODE_INT
936 && width == HOST_BITS_PER_WIDE_INT * 2
937 && (GET_CODE (trueop0) == CONST_DOUBLE
938 || GET_CODE (trueop0) == CONST_INT)
939 && (GET_CODE (trueop1) == CONST_DOUBLE
940 || GET_CODE (trueop1) == CONST_INT))
942 unsigned HOST_WIDE_INT l1, l2, lv;
943 HOST_WIDE_INT h1, h2, hv;
945 if (GET_CODE (trueop0) == CONST_DOUBLE)
946 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
947 else
948 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
950 if (GET_CODE (trueop1) == CONST_DOUBLE)
951 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
952 else
953 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
955 switch (code)
957 case MINUS:
958 /* A - B == A + (-B). */
959 neg_double (l2, h2, &lv, &hv);
960 l2 = lv, h2 = hv;
962 /* .. fall through ... */
964 case PLUS:
965 add_double (l1, h1, l2, h2, &lv, &hv);
966 break;
968 case MULT:
969 mul_double (l1, h1, l2, h2, &lv, &hv);
970 break;
972 case DIV: case MOD: case UDIV: case UMOD:
973 /* We'd need to include tree.h to do this and it doesn't seem worth
974 it. */
975 return 0;
977 case AND:
978 lv = l1 & l2, hv = h1 & h2;
979 break;
981 case IOR:
982 lv = l1 | l2, hv = h1 | h2;
983 break;
985 case XOR:
986 lv = l1 ^ l2, hv = h1 ^ h2;
987 break;
989 case SMIN:
990 if (h1 < h2
991 || (h1 == h2
992 && ((unsigned HOST_WIDE_INT) l1
993 < (unsigned HOST_WIDE_INT) l2)))
994 lv = l1, hv = h1;
995 else
996 lv = l2, hv = h2;
997 break;
999 case SMAX:
1000 if (h1 > h2
1001 || (h1 == h2
1002 && ((unsigned HOST_WIDE_INT) l1
1003 > (unsigned HOST_WIDE_INT) l2)))
1004 lv = l1, hv = h1;
1005 else
1006 lv = l2, hv = h2;
1007 break;
1009 case UMIN:
1010 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1011 || (h1 == h2
1012 && ((unsigned HOST_WIDE_INT) l1
1013 < (unsigned HOST_WIDE_INT) l2)))
1014 lv = l1, hv = h1;
1015 else
1016 lv = l2, hv = h2;
1017 break;
1019 case UMAX:
1020 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1021 || (h1 == h2
1022 && ((unsigned HOST_WIDE_INT) l1
1023 > (unsigned HOST_WIDE_INT) l2)))
1024 lv = l1, hv = h1;
1025 else
1026 lv = l2, hv = h2;
1027 break;
1029 case LSHIFTRT: case ASHIFTRT:
1030 case ASHIFT:
1031 case ROTATE: case ROTATERT:
1032 #ifdef SHIFT_COUNT_TRUNCATED
1033 if (SHIFT_COUNT_TRUNCATED)
1034 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1035 #endif
1037 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1038 return 0;
1040 if (code == LSHIFTRT || code == ASHIFTRT)
1041 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1042 code == ASHIFTRT);
1043 else if (code == ASHIFT)
1044 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1045 else if (code == ROTATE)
1046 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1047 else /* code == ROTATERT */
1048 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1049 break;
1051 default:
1052 return 0;
1055 return immed_double_const (lv, hv, mode);
1058 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1059 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1061 /* Even if we can't compute a constant result,
1062 there are some cases worth simplifying. */
1064 switch (code)
1066 case PLUS:
1067 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1068 when x is NaN, infinite, or finite and nonzero. They aren't
1069 when x is -0 and the rounding mode is not towards -infinity,
1070 since (-0) + 0 is then 0. */
1071 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1072 return op0;
1074 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1075 transformations are safe even for IEEE. */
1076 if (GET_CODE (op0) == NEG)
1077 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1078 else if (GET_CODE (op1) == NEG)
1079 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1081 /* (~a) + 1 -> -a */
1082 if (INTEGRAL_MODE_P (mode)
1083 && GET_CODE (op0) == NOT
1084 && trueop1 == const1_rtx)
1085 return gen_rtx_NEG (mode, XEXP (op0, 0));
1087 /* Handle both-operands-constant cases. We can only add
1088 CONST_INTs to constants since the sum of relocatable symbols
1089 can't be handled by most assemblers. Don't add CONST_INT
1090 to CONST_INT since overflow won't be computed properly if wider
1091 than HOST_BITS_PER_WIDE_INT. */
1093 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1094 && GET_CODE (op1) == CONST_INT)
1095 return plus_constant (op0, INTVAL (op1));
1096 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1097 && GET_CODE (op0) == CONST_INT)
1098 return plus_constant (op1, INTVAL (op0));
1100 /* See if this is something like X * C - X or vice versa or
1101 if the multiplication is written as a shift. If so, we can
1102 distribute and make a new multiply, shift, or maybe just
1103 have X (if C is 2 in the example above). But don't make
1104 real multiply if we didn't have one before. */
1106 if (! FLOAT_MODE_P (mode))
1108 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1109 rtx lhs = op0, rhs = op1;
1110 int had_mult = 0;
1112 if (GET_CODE (lhs) == NEG)
1113 coeff0 = -1, lhs = XEXP (lhs, 0);
1114 else if (GET_CODE (lhs) == MULT
1115 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1117 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1118 had_mult = 1;
1120 else if (GET_CODE (lhs) == ASHIFT
1121 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1122 && INTVAL (XEXP (lhs, 1)) >= 0
1123 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1125 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1126 lhs = XEXP (lhs, 0);
1129 if (GET_CODE (rhs) == NEG)
1130 coeff1 = -1, rhs = XEXP (rhs, 0);
1131 else if (GET_CODE (rhs) == MULT
1132 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1134 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1135 had_mult = 1;
1137 else if (GET_CODE (rhs) == ASHIFT
1138 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1139 && INTVAL (XEXP (rhs, 1)) >= 0
1140 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1142 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1143 rhs = XEXP (rhs, 0);
1146 if (rtx_equal_p (lhs, rhs))
1148 tem = simplify_gen_binary (MULT, mode, lhs,
1149 GEN_INT (coeff0 + coeff1));
1150 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1154 /* If one of the operands is a PLUS or a MINUS, see if we can
1155 simplify this by the associative law.
1156 Don't use the associative law for floating point.
1157 The inaccuracy makes it nonassociative,
1158 and subtle programs can break if operations are associated. */
1160 if (INTEGRAL_MODE_P (mode)
1161 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1162 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1163 || (GET_CODE (op0) == CONST
1164 && GET_CODE (XEXP (op0, 0)) == PLUS)
1165 || (GET_CODE (op1) == CONST
1166 && GET_CODE (XEXP (op1, 0)) == PLUS))
1167 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1168 return tem;
1169 break;
1171 case COMPARE:
1172 #ifdef HAVE_cc0
1173 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1174 using cc0, in which case we want to leave it as a COMPARE
1175 so we can distinguish it from a register-register-copy.
1177 In IEEE floating point, x-0 is not the same as x. */
1179 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1180 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1181 && trueop1 == CONST0_RTX (mode))
1182 return op0;
1183 #endif
1185 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1186 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1187 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1188 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1190 rtx xop00 = XEXP (op0, 0);
1191 rtx xop10 = XEXP (op1, 0);
1193 #ifdef HAVE_cc0
1194 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1195 #else
1196 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1197 && GET_MODE (xop00) == GET_MODE (xop10)
1198 && REGNO (xop00) == REGNO (xop10)
1199 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1200 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1201 #endif
1202 return xop00;
1204 break;
1206 case MINUS:
1207 /* We can't assume x-x is 0 even with non-IEEE floating point,
1208 but since it is zero except in very strange circumstances, we
1209 will treat it as zero with -funsafe-math-optimizations. */
1210 if (rtx_equal_p (trueop0, trueop1)
1211 && ! side_effects_p (op0)
1212 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1213 return CONST0_RTX (mode);
1215 /* Change subtraction from zero into negation. (0 - x) is the
1216 same as -x when x is NaN, infinite, or finite and nonzero.
1217 But if the mode has signed zeros, and does not round towards
1218 -infinity, then 0 - 0 is 0, not -0. */
1219 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1220 return gen_rtx_NEG (mode, op1);
1222 /* (-1 - a) is ~a. */
1223 if (trueop0 == constm1_rtx)
1224 return gen_rtx_NOT (mode, op1);
1226 /* Subtracting 0 has no effect unless the mode has signed zeros
1227 and supports rounding towards -infinity. In such a case,
1228 0 - 0 is -0. */
1229 if (!(HONOR_SIGNED_ZEROS (mode)
1230 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1231 && trueop1 == CONST0_RTX (mode))
1232 return op0;
1234 /* See if this is something like X * C - X or vice versa or
1235 if the multiplication is written as a shift. If so, we can
1236 distribute and make a new multiply, shift, or maybe just
1237 have X (if C is 2 in the example above). But don't make
1238 real multiply if we didn't have one before. */
1240 if (! FLOAT_MODE_P (mode))
1242 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1243 rtx lhs = op0, rhs = op1;
1244 int had_mult = 0;
1246 if (GET_CODE (lhs) == NEG)
1247 coeff0 = -1, lhs = XEXP (lhs, 0);
1248 else if (GET_CODE (lhs) == MULT
1249 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1251 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1252 had_mult = 1;
1254 else if (GET_CODE (lhs) == ASHIFT
1255 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1256 && INTVAL (XEXP (lhs, 1)) >= 0
1257 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1259 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1260 lhs = XEXP (lhs, 0);
1263 if (GET_CODE (rhs) == NEG)
1264 coeff1 = - 1, rhs = XEXP (rhs, 0);
1265 else if (GET_CODE (rhs) == MULT
1266 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1268 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1269 had_mult = 1;
1271 else if (GET_CODE (rhs) == ASHIFT
1272 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1273 && INTVAL (XEXP (rhs, 1)) >= 0
1274 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1276 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1277 rhs = XEXP (rhs, 0);
1280 if (rtx_equal_p (lhs, rhs))
1282 tem = simplify_gen_binary (MULT, mode, lhs,
1283 GEN_INT (coeff0 - coeff1));
1284 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1288 /* (a - (-b)) -> (a + b). True even for IEEE. */
1289 if (GET_CODE (op1) == NEG)
1290 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1292 /* If one of the operands is a PLUS or a MINUS, see if we can
1293 simplify this by the associative law.
1294 Don't use the associative law for floating point.
1295 The inaccuracy makes it nonassociative,
1296 and subtle programs can break if operations are associated. */
1298 if (INTEGRAL_MODE_P (mode)
1299 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1300 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1301 || (GET_CODE (op0) == CONST
1302 && GET_CODE (XEXP (op0, 0)) == PLUS)
1303 || (GET_CODE (op1) == CONST
1304 && GET_CODE (XEXP (op1, 0)) == PLUS))
1305 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1306 return tem;
1308 /* Don't let a relocatable value get a negative coeff. */
1309 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1310 return simplify_gen_binary (PLUS, mode,
1311 op0,
1312 neg_const_int (mode, op1));
1314 /* (x - (x & y)) -> (x & ~y) */
1315 if (GET_CODE (op1) == AND)
1317 if (rtx_equal_p (op0, XEXP (op1, 0)))
1319 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1320 GET_MODE (XEXP (op1, 1)));
1321 return simplify_gen_binary (AND, mode, op0, tem);
1323 if (rtx_equal_p (op0, XEXP (op1, 1)))
1325 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1326 GET_MODE (XEXP (op1, 0)));
1327 return simplify_gen_binary (AND, mode, op0, tem);
1330 break;
1332 case MULT:
1333 if (trueop1 == constm1_rtx)
1335 tem = simplify_unary_operation (NEG, mode, op0, mode);
1337 return tem ? tem : gen_rtx_NEG (mode, op0);
1340 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1341 x is NaN, since x * 0 is then also NaN. Nor is it valid
1342 when the mode has signed zeros, since multiplying a negative
1343 number by 0 will give -0, not 0. */
1344 if (!HONOR_NANS (mode)
1345 && !HONOR_SIGNED_ZEROS (mode)
1346 && trueop1 == CONST0_RTX (mode)
1347 && ! side_effects_p (op0))
1348 return op1;
1350 /* In IEEE floating point, x*1 is not equivalent to x for
1351 signalling NaNs. */
1352 if (!HONOR_SNANS (mode)
1353 && trueop1 == CONST1_RTX (mode))
1354 return op0;
1356 /* Convert multiply by constant power of two into shift unless
1357 we are still generating RTL. This test is a kludge. */
1358 if (GET_CODE (trueop1) == CONST_INT
1359 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1360 /* If the mode is larger than the host word size, and the
1361 uppermost bit is set, then this isn't a power of two due
1362 to implicit sign extension. */
1363 && (width <= HOST_BITS_PER_WIDE_INT
1364 || val != HOST_BITS_PER_WIDE_INT - 1)
1365 && ! rtx_equal_function_value_matters)
1366 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1368 /* x*2 is x+x and x*(-1) is -x */
1369 if (GET_CODE (trueop1) == CONST_DOUBLE
1370 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1371 && GET_MODE (op0) == mode)
1373 REAL_VALUE_TYPE d;
1374 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1376 if (REAL_VALUES_EQUAL (d, dconst2))
1377 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1379 if (REAL_VALUES_EQUAL (d, dconstm1))
1380 return gen_rtx_NEG (mode, op0);
1382 break;
1384 case IOR:
1385 if (trueop1 == const0_rtx)
1386 return op0;
1387 if (GET_CODE (trueop1) == CONST_INT
1388 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1389 == GET_MODE_MASK (mode)))
1390 return op1;
1391 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1392 return op0;
1393 /* A | (~A) -> -1 */
1394 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1395 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1396 && ! side_effects_p (op0)
1397 && GET_MODE_CLASS (mode) != MODE_CC)
1398 return constm1_rtx;
1399 break;
1401 case XOR:
1402 if (trueop1 == const0_rtx)
1403 return op0;
1404 if (GET_CODE (trueop1) == CONST_INT
1405 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1406 == GET_MODE_MASK (mode)))
1407 return gen_rtx_NOT (mode, op0);
1408 if (trueop0 == trueop1 && ! side_effects_p (op0)
1409 && GET_MODE_CLASS (mode) != MODE_CC)
1410 return const0_rtx;
1411 break;
1413 case AND:
1414 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1415 return const0_rtx;
1416 if (GET_CODE (trueop1) == CONST_INT
1417 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1418 == GET_MODE_MASK (mode)))
1419 return op0;
1420 if (trueop0 == trueop1 && ! side_effects_p (op0)
1421 && GET_MODE_CLASS (mode) != MODE_CC)
1422 return op0;
1423 /* A & (~A) -> 0 */
1424 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1425 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1426 && ! side_effects_p (op0)
1427 && GET_MODE_CLASS (mode) != MODE_CC)
1428 return const0_rtx;
1429 break;
1431 case UDIV:
1432 /* Convert divide by power of two into shift (divide by 1 handled
1433 below). */
1434 if (GET_CODE (trueop1) == CONST_INT
1435 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1436 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1438 /* ... fall through ... */
1440 case DIV:
1441 if (trueop1 == CONST1_RTX (mode))
1443 /* On some platforms DIV uses narrower mode than its
1444 operands. */
1445 rtx x = gen_lowpart_common (mode, op0);
1446 if (x)
1447 return x;
1448 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1449 return gen_lowpart_SUBREG (mode, op0);
1450 else
1451 return op0;
1454 /* Maybe change 0 / x to 0. This transformation isn't safe for
1455 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1456 Nor is it safe for modes with signed zeros, since dividing
1457 0 by a negative number gives -0, not 0. */
1458 if (!HONOR_NANS (mode)
1459 && !HONOR_SIGNED_ZEROS (mode)
1460 && trueop0 == CONST0_RTX (mode)
1461 && ! side_effects_p (op1))
1462 return op0;
1464 /* Change division by a constant into multiplication. Only do
1465 this with -funsafe-math-optimizations. */
1466 else if (GET_CODE (trueop1) == CONST_DOUBLE
1467 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1468 && trueop1 != CONST0_RTX (mode)
1469 && flag_unsafe_math_optimizations)
1471 REAL_VALUE_TYPE d;
1472 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1474 if (! REAL_VALUES_EQUAL (d, dconst0))
1476 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1477 return gen_rtx_MULT (mode, op0,
1478 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1481 break;
1483 case UMOD:
1484 /* Handle modulus by power of two (mod with 1 handled below). */
1485 if (GET_CODE (trueop1) == CONST_INT
1486 && exact_log2 (INTVAL (trueop1)) > 0)
1487 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1489 /* ... fall through ... */
1491 case MOD:
1492 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1493 && ! side_effects_p (op0) && ! side_effects_p (op1))
1494 return const0_rtx;
1495 break;
1497 case ROTATERT:
1498 case ROTATE:
1499 case ASHIFTRT:
1500 /* Rotating ~0 always results in ~0. */
1501 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1502 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1503 && ! side_effects_p (op1))
1504 return op0;
1506 /* ... fall through ... */
1508 case ASHIFT:
1509 case LSHIFTRT:
1510 if (trueop1 == const0_rtx)
1511 return op0;
1512 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1513 return op0;
1514 break;
1516 case SMIN:
1517 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1518 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1519 && ! side_effects_p (op0))
1520 return op1;
1521 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1522 return op0;
1523 break;
1525 case SMAX:
1526 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1527 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1528 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1529 && ! side_effects_p (op0))
1530 return op1;
1531 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1532 return op0;
1533 break;
1535 case UMIN:
1536 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1537 return op1;
1538 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1539 return op0;
1540 break;
1542 case UMAX:
1543 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1544 return op1;
1545 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1546 return op0;
1547 break;
1549 case SS_PLUS:
1550 case US_PLUS:
1551 case SS_MINUS:
1552 case US_MINUS:
1553 /* ??? There are simplifications that can be done. */
1554 return 0;
1556 case VEC_SELECT:
1557 if (!VECTOR_MODE_P (mode))
1559 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1560 || (mode
1561 != GET_MODE_INNER (GET_MODE (trueop0)))
1562 || GET_CODE (trueop1) != PARALLEL
1563 || XVECLEN (trueop1, 0) != 1
1564 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1565 abort ();
1567 if (GET_CODE (trueop0) == CONST_VECTOR)
1568 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1570 else
1572 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1573 || (GET_MODE_INNER (mode)
1574 != GET_MODE_INNER (GET_MODE (trueop0)))
1575 || GET_CODE (trueop1) != PARALLEL)
1576 abort ();
1578 if (GET_CODE (trueop0) == CONST_VECTOR)
1580 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1581 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1582 rtvec v = rtvec_alloc (n_elts);
1583 unsigned int i;
1585 if (XVECLEN (trueop1, 0) != (int) n_elts)
1586 abort ();
1587 for (i = 0; i < n_elts; i++)
1589 rtx x = XVECEXP (trueop1, 0, i);
1591 if (GET_CODE (x) != CONST_INT)
1592 abort ();
1593 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1596 return gen_rtx_CONST_VECTOR (mode, v);
1599 return 0;
1600 case VEC_CONCAT:
1602 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1603 ? GET_MODE (trueop0)
1604 : GET_MODE_INNER (mode));
1605 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1606 ? GET_MODE (trueop1)
1607 : GET_MODE_INNER (mode));
1609 if (!VECTOR_MODE_P (mode)
1610 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1611 != GET_MODE_SIZE (mode)))
1612 abort ();
1614 if ((VECTOR_MODE_P (op0_mode)
1615 && (GET_MODE_INNER (mode)
1616 != GET_MODE_INNER (op0_mode)))
1617 || (!VECTOR_MODE_P (op0_mode)
1618 && GET_MODE_INNER (mode) != op0_mode))
1619 abort ();
1621 if ((VECTOR_MODE_P (op1_mode)
1622 && (GET_MODE_INNER (mode)
1623 != GET_MODE_INNER (op1_mode)))
1624 || (!VECTOR_MODE_P (op1_mode)
1625 && GET_MODE_INNER (mode) != op1_mode))
1626 abort ();
1628 if ((GET_CODE (trueop0) == CONST_VECTOR
1629 || GET_CODE (trueop0) == CONST_INT
1630 || GET_CODE (trueop0) == CONST_DOUBLE)
1631 && (GET_CODE (trueop1) == CONST_VECTOR
1632 || GET_CODE (trueop1) == CONST_INT
1633 || GET_CODE (trueop1) == CONST_DOUBLE))
1635 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1636 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1637 rtvec v = rtvec_alloc (n_elts);
1638 unsigned int i;
1639 unsigned in_n_elts = 1;
1641 if (VECTOR_MODE_P (op0_mode))
1642 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1643 for (i = 0; i < n_elts; i++)
1645 if (i < in_n_elts)
1647 if (!VECTOR_MODE_P (op0_mode))
1648 RTVEC_ELT (v, i) = trueop0;
1649 else
1650 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1652 else
1654 if (!VECTOR_MODE_P (op1_mode))
1655 RTVEC_ELT (v, i) = trueop1;
1656 else
1657 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1658 i - in_n_elts);
1662 return gen_rtx_CONST_VECTOR (mode, v);
1665 return 0;
1667 default:
1668 abort ();
1671 return 0;
1674 /* Get the integer argument values in two forms:
1675 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1677 arg0 = INTVAL (trueop0);
1678 arg1 = INTVAL (trueop1);
1680 if (width < HOST_BITS_PER_WIDE_INT)
1682 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1683 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1685 arg0s = arg0;
1686 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1687 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1689 arg1s = arg1;
1690 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1691 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1693 else
1695 arg0s = arg0;
1696 arg1s = arg1;
1699 /* Compute the value of the arithmetic. */
1701 switch (code)
1703 case PLUS:
1704 val = arg0s + arg1s;
1705 break;
1707 case MINUS:
1708 val = arg0s - arg1s;
1709 break;
1711 case MULT:
1712 val = arg0s * arg1s;
1713 break;
1715 case DIV:
1716 if (arg1s == 0
1717 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1718 && arg1s == -1))
1719 return 0;
1720 val = arg0s / arg1s;
1721 break;
1723 case MOD:
1724 if (arg1s == 0
1725 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1726 && arg1s == -1))
1727 return 0;
1728 val = arg0s % arg1s;
1729 break;
1731 case UDIV:
1732 if (arg1 == 0
1733 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1734 && arg1s == -1))
1735 return 0;
1736 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1737 break;
1739 case UMOD:
1740 if (arg1 == 0
1741 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1742 && arg1s == -1))
1743 return 0;
1744 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1745 break;
1747 case AND:
1748 val = arg0 & arg1;
1749 break;
1751 case IOR:
1752 val = arg0 | arg1;
1753 break;
1755 case XOR:
1756 val = arg0 ^ arg1;
1757 break;
1759 case LSHIFTRT:
1760 /* If shift count is undefined, don't fold it; let the machine do
1761 what it wants. But truncate it if the machine will do that. */
1762 if (arg1 < 0)
1763 return 0;
1765 #ifdef SHIFT_COUNT_TRUNCATED
1766 if (SHIFT_COUNT_TRUNCATED)
1767 arg1 %= width;
1768 #endif
1770 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1771 break;
1773 case ASHIFT:
1774 if (arg1 < 0)
1775 return 0;
1777 #ifdef SHIFT_COUNT_TRUNCATED
1778 if (SHIFT_COUNT_TRUNCATED)
1779 arg1 %= width;
1780 #endif
1782 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1783 break;
1785 case ASHIFTRT:
1786 if (arg1 < 0)
1787 return 0;
1789 #ifdef SHIFT_COUNT_TRUNCATED
1790 if (SHIFT_COUNT_TRUNCATED)
1791 arg1 %= width;
1792 #endif
1794 val = arg0s >> arg1;
1796 /* Bootstrap compiler may not have sign extended the right shift.
1797 Manually extend the sign to insure bootstrap cc matches gcc. */
1798 if (arg0s < 0 && arg1 > 0)
1799 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1801 break;
1803 case ROTATERT:
1804 if (arg1 < 0)
1805 return 0;
1807 arg1 %= width;
1808 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1809 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1810 break;
1812 case ROTATE:
1813 if (arg1 < 0)
1814 return 0;
1816 arg1 %= width;
1817 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1818 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1819 break;
1821 case COMPARE:
1822 /* Do nothing here. */
1823 return 0;
1825 case SMIN:
1826 val = arg0s <= arg1s ? arg0s : arg1s;
1827 break;
1829 case UMIN:
1830 val = ((unsigned HOST_WIDE_INT) arg0
1831 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1832 break;
1834 case SMAX:
1835 val = arg0s > arg1s ? arg0s : arg1s;
1836 break;
1838 case UMAX:
1839 val = ((unsigned HOST_WIDE_INT) arg0
1840 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1841 break;
1843 case SS_PLUS:
1844 case US_PLUS:
1845 case SS_MINUS:
1846 case US_MINUS:
1847 /* ??? There are simplifications that can be done. */
1848 return 0;
1850 default:
1851 abort ();
1854 val = trunc_int_for_mode (val, mode);
1856 return GEN_INT (val);
1859 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1860 PLUS or MINUS.
1862 Rather than test for specific case, we do this by a brute-force method
1863 and do all possible simplifications until no more changes occur. Then
1864 we rebuild the operation.
1866 If FORCE is true, then always generate the rtx. This is used to
1867 canonicalize stuff emitted from simplify_gen_binary. Note that this
1868 can still fail if the rtx is too complex. It won't fail just because
1869 the result is not 'simpler' than the input, however. */
1871 struct simplify_plus_minus_op_data
1873 rtx op;
1874 int neg;
1877 static int
1878 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
1880 const struct simplify_plus_minus_op_data *d1 = p1;
1881 const struct simplify_plus_minus_op_data *d2 = p2;
1883 return (commutative_operand_precedence (d2->op)
1884 - commutative_operand_precedence (d1->op));
1887 static rtx
1888 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
1889 rtx op1, int force)
1891 struct simplify_plus_minus_op_data ops[8];
1892 rtx result, tem;
1893 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1894 int first, negate, changed;
1895 int i, j;
1897 memset ((char *) ops, 0, sizeof ops);
1899 /* Set up the two operands and then expand them until nothing has been
1900 changed. If we run out of room in our array, give up; this should
1901 almost never happen. */
1903 ops[0].op = op0;
1904 ops[0].neg = 0;
1905 ops[1].op = op1;
1906 ops[1].neg = (code == MINUS);
1910 changed = 0;
1912 for (i = 0; i < n_ops; i++)
1914 rtx this_op = ops[i].op;
1915 int this_neg = ops[i].neg;
1916 enum rtx_code this_code = GET_CODE (this_op);
1918 switch (this_code)
1920 case PLUS:
1921 case MINUS:
1922 if (n_ops == 7)
1923 return NULL_RTX;
1925 ops[n_ops].op = XEXP (this_op, 1);
1926 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1927 n_ops++;
1929 ops[i].op = XEXP (this_op, 0);
1930 input_ops++;
1931 changed = 1;
1932 break;
1934 case NEG:
1935 ops[i].op = XEXP (this_op, 0);
1936 ops[i].neg = ! this_neg;
1937 changed = 1;
1938 break;
1940 case CONST:
1941 if (n_ops < 7
1942 && GET_CODE (XEXP (this_op, 0)) == PLUS
1943 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1944 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1946 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1947 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1948 ops[n_ops].neg = this_neg;
1949 n_ops++;
1950 input_consts++;
1951 changed = 1;
1953 break;
1955 case NOT:
1956 /* ~a -> (-a - 1) */
1957 if (n_ops != 7)
1959 ops[n_ops].op = constm1_rtx;
1960 ops[n_ops++].neg = this_neg;
1961 ops[i].op = XEXP (this_op, 0);
1962 ops[i].neg = !this_neg;
1963 changed = 1;
1965 break;
1967 case CONST_INT:
1968 if (this_neg)
1970 ops[i].op = neg_const_int (mode, this_op);
1971 ops[i].neg = 0;
1972 changed = 1;
1974 break;
1976 default:
1977 break;
1981 while (changed);
1983 /* If we only have two operands, we can't do anything. */
1984 if (n_ops <= 2 && !force)
1985 return NULL_RTX;
1987 /* Count the number of CONSTs we didn't split above. */
1988 for (i = 0; i < n_ops; i++)
1989 if (GET_CODE (ops[i].op) == CONST)
1990 input_consts++;
1992 /* Now simplify each pair of operands until nothing changes. The first
1993 time through just simplify constants against each other. */
1995 first = 1;
1998 changed = first;
2000 for (i = 0; i < n_ops - 1; i++)
2001 for (j = i + 1; j < n_ops; j++)
2003 rtx lhs = ops[i].op, rhs = ops[j].op;
2004 int lneg = ops[i].neg, rneg = ops[j].neg;
2006 if (lhs != 0 && rhs != 0
2007 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2009 enum rtx_code ncode = PLUS;
2011 if (lneg != rneg)
2013 ncode = MINUS;
2014 if (lneg)
2015 tem = lhs, lhs = rhs, rhs = tem;
2017 else if (swap_commutative_operands_p (lhs, rhs))
2018 tem = lhs, lhs = rhs, rhs = tem;
2020 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2022 /* Reject "simplifications" that just wrap the two
2023 arguments in a CONST. Failure to do so can result
2024 in infinite recursion with simplify_binary_operation
2025 when it calls us to simplify CONST operations. */
2026 if (tem
2027 && ! (GET_CODE (tem) == CONST
2028 && GET_CODE (XEXP (tem, 0)) == ncode
2029 && XEXP (XEXP (tem, 0), 0) == lhs
2030 && XEXP (XEXP (tem, 0), 1) == rhs)
2031 /* Don't allow -x + -1 -> ~x simplifications in the
2032 first pass. This allows us the chance to combine
2033 the -1 with other constants. */
2034 && ! (first
2035 && GET_CODE (tem) == NOT
2036 && XEXP (tem, 0) == rhs))
2038 lneg &= rneg;
2039 if (GET_CODE (tem) == NEG)
2040 tem = XEXP (tem, 0), lneg = !lneg;
2041 if (GET_CODE (tem) == CONST_INT && lneg)
2042 tem = neg_const_int (mode, tem), lneg = 0;
2044 ops[i].op = tem;
2045 ops[i].neg = lneg;
2046 ops[j].op = NULL_RTX;
2047 changed = 1;
2052 first = 0;
2054 while (changed);
2056 /* Pack all the operands to the lower-numbered entries. */
2057 for (i = 0, j = 0; j < n_ops; j++)
2058 if (ops[j].op)
2059 ops[i++] = ops[j];
2060 n_ops = i;
2062 /* Sort the operations based on swap_commutative_operands_p. */
2063 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2065 /* We suppressed creation of trivial CONST expressions in the
2066 combination loop to avoid recursion. Create one manually now.
2067 The combination loop should have ensured that there is exactly
2068 one CONST_INT, and the sort will have ensured that it is last
2069 in the array and that any other constant will be next-to-last. */
2071 if (n_ops > 1
2072 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2073 && CONSTANT_P (ops[n_ops - 2].op))
2075 rtx value = ops[n_ops - 1].op;
2076 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2077 value = neg_const_int (mode, value);
2078 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2079 n_ops--;
2082 /* Count the number of CONSTs that we generated. */
2083 n_consts = 0;
2084 for (i = 0; i < n_ops; i++)
2085 if (GET_CODE (ops[i].op) == CONST)
2086 n_consts++;
2088 /* Give up if we didn't reduce the number of operands we had. Make
2089 sure we count a CONST as two operands. If we have the same
2090 number of operands, but have made more CONSTs than before, this
2091 is also an improvement, so accept it. */
2092 if (!force
2093 && (n_ops + n_consts > input_ops
2094 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2095 return NULL_RTX;
2097 /* Put a non-negated operand first. If there aren't any, make all
2098 operands positive and negate the whole thing later. */
2100 negate = 0;
2101 for (i = 0; i < n_ops && ops[i].neg; i++)
2102 continue;
2103 if (i == n_ops)
2105 for (i = 0; i < n_ops; i++)
2106 ops[i].neg = 0;
2107 negate = 1;
2109 else if (i != 0)
2111 tem = ops[0].op;
2112 ops[0] = ops[i];
2113 ops[i].op = tem;
2114 ops[i].neg = 1;
2117 /* Now make the result by performing the requested operations. */
2118 result = ops[0].op;
2119 for (i = 1; i < n_ops; i++)
2120 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2121 mode, result, ops[i].op);
2123 return negate ? gen_rtx_NEG (mode, result) : result;
2126 /* Like simplify_binary_operation except used for relational operators.
2127 MODE is the mode of the operands, not that of the result. If MODE
2128 is VOIDmode, both operands must also be VOIDmode and we compare the
2129 operands in "infinite precision".
2131 If no simplification is possible, this function returns zero. Otherwise,
2132 it returns either const_true_rtx or const0_rtx. */
2135 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2136 rtx op0, rtx op1)
2138 int equal, op0lt, op0ltu, op1lt, op1ltu;
2139 rtx tem;
2140 rtx trueop0;
2141 rtx trueop1;
2143 if (mode == VOIDmode
2144 && (GET_MODE (op0) != VOIDmode
2145 || GET_MODE (op1) != VOIDmode))
2146 abort ();
2148 /* If op0 is a compare, extract the comparison arguments from it. */
2149 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2150 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2152 trueop0 = avoid_constant_pool_reference (op0);
2153 trueop1 = avoid_constant_pool_reference (op1);
2155 /* We can't simplify MODE_CC values since we don't know what the
2156 actual comparison is. */
2157 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2158 return 0;
2160 /* Make sure the constant is second. */
2161 if (swap_commutative_operands_p (trueop0, trueop1))
2163 tem = op0, op0 = op1, op1 = tem;
2164 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2165 code = swap_condition (code);
2168 /* For integer comparisons of A and B maybe we can simplify A - B and can
2169 then simplify a comparison of that with zero. If A and B are both either
2170 a register or a CONST_INT, this can't help; testing for these cases will
2171 prevent infinite recursion here and speed things up.
2173 If CODE is an unsigned comparison, then we can never do this optimization,
2174 because it gives an incorrect result if the subtraction wraps around zero.
2175 ANSI C defines unsigned operations such that they never overflow, and
2176 thus such cases can not be ignored. */
2178 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2179 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2180 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2181 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2182 && code != GTU && code != GEU && code != LTU && code != LEU)
2183 return simplify_relational_operation (signed_condition (code),
2184 mode, tem, const0_rtx);
2186 if (flag_unsafe_math_optimizations && code == ORDERED)
2187 return const_true_rtx;
2189 if (flag_unsafe_math_optimizations && code == UNORDERED)
2190 return const0_rtx;
2192 /* For modes without NaNs, if the two operands are equal, we know the
2193 result except if they have side-effects. */
2194 if (! HONOR_NANS (GET_MODE (trueop0))
2195 && rtx_equal_p (trueop0, trueop1)
2196 && ! side_effects_p (trueop0))
2197 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2199 /* If the operands are floating-point constants, see if we can fold
2200 the result. */
2201 else if (GET_CODE (trueop0) == CONST_DOUBLE
2202 && GET_CODE (trueop1) == CONST_DOUBLE
2203 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2205 REAL_VALUE_TYPE d0, d1;
2207 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2208 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2210 /* Comparisons are unordered iff at least one of the values is NaN. */
2211 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2212 switch (code)
2214 case UNEQ:
2215 case UNLT:
2216 case UNGT:
2217 case UNLE:
2218 case UNGE:
2219 case NE:
2220 case UNORDERED:
2221 return const_true_rtx;
2222 case EQ:
2223 case LT:
2224 case GT:
2225 case LE:
2226 case GE:
2227 case LTGT:
2228 case ORDERED:
2229 return const0_rtx;
2230 default:
2231 return 0;
2234 equal = REAL_VALUES_EQUAL (d0, d1);
2235 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2236 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2239 /* Otherwise, see if the operands are both integers. */
2240 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2241 && (GET_CODE (trueop0) == CONST_DOUBLE
2242 || GET_CODE (trueop0) == CONST_INT)
2243 && (GET_CODE (trueop1) == CONST_DOUBLE
2244 || GET_CODE (trueop1) == CONST_INT))
2246 int width = GET_MODE_BITSIZE (mode);
2247 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2248 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2250 /* Get the two words comprising each integer constant. */
2251 if (GET_CODE (trueop0) == CONST_DOUBLE)
2253 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2254 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2256 else
2258 l0u = l0s = INTVAL (trueop0);
2259 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2262 if (GET_CODE (trueop1) == CONST_DOUBLE)
2264 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2265 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2267 else
2269 l1u = l1s = INTVAL (trueop1);
2270 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2273 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2274 we have to sign or zero-extend the values. */
2275 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2277 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2278 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2280 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2281 l0s |= ((HOST_WIDE_INT) (-1) << width);
2283 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2284 l1s |= ((HOST_WIDE_INT) (-1) << width);
2286 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2287 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2289 equal = (h0u == h1u && l0u == l1u);
2290 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2291 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2292 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2293 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2296 /* Otherwise, there are some code-specific tests we can make. */
2297 else
2299 switch (code)
2301 case EQ:
2302 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2303 return const0_rtx;
2304 break;
2306 case NE:
2307 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2308 return const_true_rtx;
2309 break;
2311 case GEU:
2312 /* Unsigned values are never negative. */
2313 if (trueop1 == const0_rtx)
2314 return const_true_rtx;
2315 break;
2317 case LTU:
2318 if (trueop1 == const0_rtx)
2319 return const0_rtx;
2320 break;
2322 case LEU:
2323 /* Unsigned values are never greater than the largest
2324 unsigned value. */
2325 if (GET_CODE (trueop1) == CONST_INT
2326 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2327 && INTEGRAL_MODE_P (mode))
2328 return const_true_rtx;
2329 break;
2331 case GTU:
2332 if (GET_CODE (trueop1) == CONST_INT
2333 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2334 && INTEGRAL_MODE_P (mode))
2335 return const0_rtx;
2336 break;
2338 case LT:
2339 /* Optimize abs(x) < 0.0. */
2340 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2342 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2343 : trueop0;
2344 if (GET_CODE (tem) == ABS)
2345 return const0_rtx;
2347 break;
2349 case GE:
2350 /* Optimize abs(x) >= 0.0. */
2351 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2353 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2354 : trueop0;
2355 if (GET_CODE (tem) == ABS)
2356 return const_true_rtx;
2358 break;
2360 case UNGE:
2361 /* Optimize ! (abs(x) < 0.0). */
2362 if (trueop1 == CONST0_RTX (mode))
2364 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2365 : trueop0;
2366 if (GET_CODE (tem) == ABS)
2367 return const_true_rtx;
2369 break;
2371 default:
2372 break;
2375 return 0;
2378 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2379 as appropriate. */
2380 switch (code)
2382 case EQ:
2383 case UNEQ:
2384 return equal ? const_true_rtx : const0_rtx;
2385 case NE:
2386 case LTGT:
2387 return ! equal ? const_true_rtx : const0_rtx;
2388 case LT:
2389 case UNLT:
2390 return op0lt ? const_true_rtx : const0_rtx;
2391 case GT:
2392 case UNGT:
2393 return op1lt ? const_true_rtx : const0_rtx;
2394 case LTU:
2395 return op0ltu ? const_true_rtx : const0_rtx;
2396 case GTU:
2397 return op1ltu ? const_true_rtx : const0_rtx;
2398 case LE:
2399 case UNLE:
2400 return equal || op0lt ? const_true_rtx : const0_rtx;
2401 case GE:
2402 case UNGE:
2403 return equal || op1lt ? const_true_rtx : const0_rtx;
2404 case LEU:
2405 return equal || op0ltu ? const_true_rtx : const0_rtx;
2406 case GEU:
2407 return equal || op1ltu ? const_true_rtx : const0_rtx;
2408 case ORDERED:
2409 return const_true_rtx;
2410 case UNORDERED:
2411 return const0_rtx;
2412 default:
2413 abort ();
2417 /* Simplify CODE, an operation with result mode MODE and three operands,
2418 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2419 a constant. Return 0 if no simplifications is possible. */
2422 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2423 enum machine_mode op0_mode, rtx op0, rtx op1,
2424 rtx op2)
2426 unsigned int width = GET_MODE_BITSIZE (mode);
2428 /* VOIDmode means "infinite" precision. */
2429 if (width == 0)
2430 width = HOST_BITS_PER_WIDE_INT;
2432 switch (code)
2434 case SIGN_EXTRACT:
2435 case ZERO_EXTRACT:
2436 if (GET_CODE (op0) == CONST_INT
2437 && GET_CODE (op1) == CONST_INT
2438 && GET_CODE (op2) == CONST_INT
2439 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2440 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2442 /* Extracting a bit-field from a constant */
2443 HOST_WIDE_INT val = INTVAL (op0);
2445 if (BITS_BIG_ENDIAN)
2446 val >>= (GET_MODE_BITSIZE (op0_mode)
2447 - INTVAL (op2) - INTVAL (op1));
2448 else
2449 val >>= INTVAL (op2);
2451 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2453 /* First zero-extend. */
2454 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2455 /* If desired, propagate sign bit. */
2456 if (code == SIGN_EXTRACT
2457 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2458 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2461 /* Clear the bits that don't belong in our mode,
2462 unless they and our sign bit are all one.
2463 So we get either a reasonable negative value or a reasonable
2464 unsigned value for this mode. */
2465 if (width < HOST_BITS_PER_WIDE_INT
2466 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2467 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2468 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2470 return GEN_INT (val);
2472 break;
2474 case IF_THEN_ELSE:
2475 if (GET_CODE (op0) == CONST_INT)
2476 return op0 != const0_rtx ? op1 : op2;
2478 /* Convert a == b ? b : a to "a". */
2479 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2480 && !HONOR_NANS (mode)
2481 && rtx_equal_p (XEXP (op0, 0), op1)
2482 && rtx_equal_p (XEXP (op0, 1), op2))
2483 return op1;
2484 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2485 && !HONOR_NANS (mode)
2486 && rtx_equal_p (XEXP (op0, 1), op1)
2487 && rtx_equal_p (XEXP (op0, 0), op2))
2488 return op2;
2489 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2491 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2492 ? GET_MODE (XEXP (op0, 1))
2493 : GET_MODE (XEXP (op0, 0)));
2494 rtx temp;
2495 if (cmp_mode == VOIDmode)
2496 cmp_mode = op0_mode;
2497 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2498 XEXP (op0, 0), XEXP (op0, 1));
2500 /* See if any simplifications were possible. */
2501 if (temp == const0_rtx)
2502 return op2;
2503 else if (temp == const1_rtx)
2504 return op1;
2505 else if (temp)
2506 op0 = temp;
2508 /* Look for happy constants in op1 and op2. */
2509 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2511 HOST_WIDE_INT t = INTVAL (op1);
2512 HOST_WIDE_INT f = INTVAL (op2);
2514 if (t == STORE_FLAG_VALUE && f == 0)
2515 code = GET_CODE (op0);
2516 else if (t == 0 && f == STORE_FLAG_VALUE)
2518 enum rtx_code tmp;
2519 tmp = reversed_comparison_code (op0, NULL_RTX);
2520 if (tmp == UNKNOWN)
2521 break;
2522 code = tmp;
2524 else
2525 break;
2527 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2530 break;
2531 case VEC_MERGE:
2532 if (GET_MODE (op0) != mode
2533 || GET_MODE (op1) != mode
2534 || !VECTOR_MODE_P (mode))
2535 abort ();
2536 op2 = avoid_constant_pool_reference (op2);
2537 if (GET_CODE (op2) == CONST_INT)
2539 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2540 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2541 int mask = (1 << n_elts) - 1;
2543 if (!(INTVAL (op2) & mask))
2544 return op1;
2545 if ((INTVAL (op2) & mask) == mask)
2546 return op0;
2548 op0 = avoid_constant_pool_reference (op0);
2549 op1 = avoid_constant_pool_reference (op1);
2550 if (GET_CODE (op0) == CONST_VECTOR
2551 && GET_CODE (op1) == CONST_VECTOR)
2553 rtvec v = rtvec_alloc (n_elts);
2554 unsigned int i;
2556 for (i = 0; i < n_elts; i++)
2557 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2558 ? CONST_VECTOR_ELT (op0, i)
2559 : CONST_VECTOR_ELT (op1, i));
2560 return gen_rtx_CONST_VECTOR (mode, v);
2563 break;
2565 default:
2566 abort ();
2569 return 0;
2572 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2573 Return 0 if no simplifications is possible. */
2575 simplify_subreg (enum machine_mode outermode, rtx op,
2576 enum machine_mode innermode, unsigned int byte)
2578 /* Little bit of sanity checking. */
2579 if (innermode == VOIDmode || outermode == VOIDmode
2580 || innermode == BLKmode || outermode == BLKmode)
2581 abort ();
2583 if (GET_MODE (op) != innermode
2584 && GET_MODE (op) != VOIDmode)
2585 abort ();
2587 if (byte % GET_MODE_SIZE (outermode)
2588 || byte >= GET_MODE_SIZE (innermode))
2589 abort ();
2591 if (outermode == innermode && !byte)
2592 return op;
2594 /* Simplify subregs of vector constants. */
2595 if (GET_CODE (op) == CONST_VECTOR)
2597 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2598 const unsigned int offset = byte / elt_size;
2599 rtx elt;
2601 if (GET_MODE_INNER (innermode) == outermode)
2603 elt = CONST_VECTOR_ELT (op, offset);
2605 /* ?? We probably don't need this copy_rtx because constants
2606 can be shared. ?? */
2608 return copy_rtx (elt);
2610 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2611 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2613 return (gen_rtx_CONST_VECTOR
2614 (outermode,
2615 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2616 &CONST_VECTOR_ELT (op, offset))));
2618 else if (GET_MODE_CLASS (outermode) == MODE_INT
2619 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2621 /* This happens when the target register size is smaller then
2622 the vector mode, and we synthesize operations with vectors
2623 of elements that are smaller than the register size. */
2624 HOST_WIDE_INT sum = 0, high = 0;
2625 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2626 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2627 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2628 int shift = BITS_PER_UNIT * elt_size;
2629 unsigned HOST_WIDE_INT unit_mask;
2631 unit_mask = (unsigned HOST_WIDE_INT) -1
2632 >> (sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - shift);
2634 for (; n_elts--; i += step)
2636 elt = CONST_VECTOR_ELT (op, i);
2637 if (GET_CODE (elt) == CONST_DOUBLE
2638 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2640 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2641 elt);
2642 if (! elt)
2643 return NULL_RTX;
2645 if (GET_CODE (elt) != CONST_INT)
2646 return NULL_RTX;
2647 /* Avoid overflow. */
2648 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
2649 return NULL_RTX;
2650 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2651 sum = (sum << shift) + (INTVAL (elt) & unit_mask);
2653 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2654 return GEN_INT (trunc_int_for_mode (sum, outermode));
2655 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2656 return immed_double_const (sum, high, outermode);
2657 else
2658 return NULL_RTX;
2660 else if (GET_MODE_CLASS (outermode) == MODE_INT
2661 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2663 enum machine_mode new_mode
2664 = int_mode_for_mode (GET_MODE_INNER (innermode));
2665 int subbyte = byte % elt_size;
2667 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2668 if (! op)
2669 return NULL_RTX;
2670 return simplify_subreg (outermode, op, new_mode, subbyte);
2672 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2673 /* This shouldn't happen, but let's not do anything stupid. */
2674 return NULL_RTX;
2677 /* Attempt to simplify constant to non-SUBREG expression. */
2678 if (CONSTANT_P (op))
2680 int offset, part;
2681 unsigned HOST_WIDE_INT val = 0;
2683 if (VECTOR_MODE_P (outermode))
2685 /* Construct a CONST_VECTOR from individual subregs. */
2686 enum machine_mode submode = GET_MODE_INNER (outermode);
2687 int subsize = GET_MODE_UNIT_SIZE (outermode);
2688 int i, elts = GET_MODE_NUNITS (outermode);
2689 rtvec v = rtvec_alloc (elts);
2690 rtx elt;
2692 for (i = 0; i < elts; i++, byte += subsize)
2694 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2695 /* ??? It would be nice if we could actually make such subregs
2696 on targets that allow such relocations. */
2697 if (byte >= GET_MODE_SIZE (innermode))
2698 elt = CONST0_RTX (submode);
2699 else
2700 elt = simplify_subreg (submode, op, innermode, byte);
2701 if (! elt)
2702 return NULL_RTX;
2703 RTVEC_ELT (v, i) = elt;
2705 return gen_rtx_CONST_VECTOR (outermode, v);
2708 /* ??? This code is partly redundant with code below, but can handle
2709 the subregs of floats and similar corner cases.
2710 Later it we should move all simplification code here and rewrite
2711 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2712 using SIMPLIFY_SUBREG. */
2713 if (subreg_lowpart_offset (outermode, innermode) == byte
2714 && GET_CODE (op) != CONST_VECTOR)
2716 rtx new = gen_lowpart_if_possible (outermode, op);
2717 if (new)
2718 return new;
2721 /* Similar comment as above apply here. */
2722 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2723 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2724 && GET_MODE_CLASS (outermode) == MODE_INT)
2726 rtx new = constant_subword (op,
2727 (byte / UNITS_PER_WORD),
2728 innermode);
2729 if (new)
2730 return new;
2733 if (GET_MODE_CLASS (outermode) != MODE_INT
2734 && GET_MODE_CLASS (outermode) != MODE_CC)
2736 enum machine_mode new_mode = int_mode_for_mode (outermode);
2738 if (new_mode != innermode || byte != 0)
2740 op = simplify_subreg (new_mode, op, innermode, byte);
2741 if (! op)
2742 return NULL_RTX;
2743 return simplify_subreg (outermode, op, new_mode, 0);
2747 offset = byte * BITS_PER_UNIT;
2748 switch (GET_CODE (op))
2750 case CONST_DOUBLE:
2751 if (GET_MODE (op) != VOIDmode)
2752 break;
2754 /* We can't handle this case yet. */
2755 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2756 return NULL_RTX;
2758 part = offset >= HOST_BITS_PER_WIDE_INT;
2759 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2760 && BYTES_BIG_ENDIAN)
2761 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2762 && WORDS_BIG_ENDIAN))
2763 part = !part;
2764 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2765 offset %= HOST_BITS_PER_WIDE_INT;
2767 /* We've already picked the word we want from a double, so
2768 pretend this is actually an integer. */
2769 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2771 /* FALLTHROUGH */
2772 case CONST_INT:
2773 if (GET_CODE (op) == CONST_INT)
2774 val = INTVAL (op);
2776 /* We don't handle synthesizing of non-integral constants yet. */
2777 if (GET_MODE_CLASS (outermode) != MODE_INT)
2778 return NULL_RTX;
2780 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2782 if (WORDS_BIG_ENDIAN)
2783 offset = (GET_MODE_BITSIZE (innermode)
2784 - GET_MODE_BITSIZE (outermode) - offset);
2785 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2786 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2787 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2788 - 2 * (offset % BITS_PER_WORD));
2791 if (offset >= HOST_BITS_PER_WIDE_INT)
2792 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2793 else
2795 val >>= offset;
2796 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2797 val = trunc_int_for_mode (val, outermode);
2798 return GEN_INT (val);
2800 default:
2801 break;
2805 /* Changing mode twice with SUBREG => just change it once,
2806 or not at all if changing back op starting mode. */
2807 if (GET_CODE (op) == SUBREG)
2809 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2810 int final_offset = byte + SUBREG_BYTE (op);
2811 rtx new;
2813 if (outermode == innermostmode
2814 && byte == 0 && SUBREG_BYTE (op) == 0)
2815 return SUBREG_REG (op);
2817 /* The SUBREG_BYTE represents offset, as if the value were stored
2818 in memory. Irritating exception is paradoxical subreg, where
2819 we define SUBREG_BYTE to be 0. On big endian machines, this
2820 value should be negative. For a moment, undo this exception. */
2821 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2823 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2824 if (WORDS_BIG_ENDIAN)
2825 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2826 if (BYTES_BIG_ENDIAN)
2827 final_offset += difference % UNITS_PER_WORD;
2829 if (SUBREG_BYTE (op) == 0
2830 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2832 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2833 if (WORDS_BIG_ENDIAN)
2834 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2835 if (BYTES_BIG_ENDIAN)
2836 final_offset += difference % UNITS_PER_WORD;
2839 /* See whether resulting subreg will be paradoxical. */
2840 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2842 /* In nonparadoxical subregs we can't handle negative offsets. */
2843 if (final_offset < 0)
2844 return NULL_RTX;
2845 /* Bail out in case resulting subreg would be incorrect. */
2846 if (final_offset % GET_MODE_SIZE (outermode)
2847 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2848 return NULL_RTX;
2850 else
2852 int offset = 0;
2853 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2855 /* In paradoxical subreg, see if we are still looking on lower part.
2856 If so, our SUBREG_BYTE will be 0. */
2857 if (WORDS_BIG_ENDIAN)
2858 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2859 if (BYTES_BIG_ENDIAN)
2860 offset += difference % UNITS_PER_WORD;
2861 if (offset == final_offset)
2862 final_offset = 0;
2863 else
2864 return NULL_RTX;
2867 /* Recurse for further possible simplifications. */
2868 new = simplify_subreg (outermode, SUBREG_REG (op),
2869 GET_MODE (SUBREG_REG (op)),
2870 final_offset);
2871 if (new)
2872 return new;
2873 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2876 /* SUBREG of a hard register => just change the register number
2877 and/or mode. If the hard register is not valid in that mode,
2878 suppress this simplification. If the hard register is the stack,
2879 frame, or argument pointer, leave this as a SUBREG. */
2881 if (REG_P (op)
2882 && (! REG_FUNCTION_VALUE_P (op)
2883 || ! rtx_equal_function_value_matters)
2884 && REGNO (op) < FIRST_PSEUDO_REGISTER
2885 #ifdef CANNOT_CHANGE_MODE_CLASS
2886 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
2887 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2888 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
2889 #endif
2890 && ((reload_completed && !frame_pointer_needed)
2891 || (REGNO (op) != FRAME_POINTER_REGNUM
2892 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2893 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2894 #endif
2896 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2897 && REGNO (op) != ARG_POINTER_REGNUM
2898 #endif
2899 && REGNO (op) != STACK_POINTER_REGNUM)
2901 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2904 /* ??? We do allow it if the current REG is not valid for
2905 its mode. This is a kludge to work around how float/complex
2906 arguments are passed on 32-bit SPARC and should be fixed. */
2907 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2908 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2910 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
2912 /* Propagate original regno. We don't have any way to specify
2913 the offset inside original regno, so do so only for lowpart.
2914 The information is used only by alias analysis that can not
2915 grog partial register anyway. */
2917 if (subreg_lowpart_offset (outermode, innermode) == byte)
2918 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2919 return x;
2923 /* If we have a SUBREG of a register that we are replacing and we are
2924 replacing it with a MEM, make a new MEM and try replacing the
2925 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2926 or if we would be widening it. */
2928 if (GET_CODE (op) == MEM
2929 && ! mode_dependent_address_p (XEXP (op, 0))
2930 /* Allow splitting of volatile memory references in case we don't
2931 have instruction to move the whole thing. */
2932 && (! MEM_VOLATILE_P (op)
2933 || ! have_insn_for (SET, innermode))
2934 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2935 return adjust_address_nv (op, outermode, byte);
2937 /* Handle complex values represented as CONCAT
2938 of real and imaginary part. */
2939 if (GET_CODE (op) == CONCAT)
2941 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2942 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2943 unsigned int final_offset;
2944 rtx res;
2946 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2947 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2948 if (res)
2949 return res;
2950 /* We can at least simplify it by referring directly to the relevant part. */
2951 return gen_rtx_SUBREG (outermode, part, final_offset);
2954 return NULL_RTX;
2956 /* Make a SUBREG operation or equivalent if it folds. */
2959 simplify_gen_subreg (enum machine_mode outermode, rtx op,
2960 enum machine_mode innermode, unsigned int byte)
2962 rtx new;
2963 /* Little bit of sanity checking. */
2964 if (innermode == VOIDmode || outermode == VOIDmode
2965 || innermode == BLKmode || outermode == BLKmode)
2966 abort ();
2968 if (GET_MODE (op) != innermode
2969 && GET_MODE (op) != VOIDmode)
2970 abort ();
2972 if (byte % GET_MODE_SIZE (outermode)
2973 || byte >= GET_MODE_SIZE (innermode))
2974 abort ();
2976 if (GET_CODE (op) == QUEUED)
2977 return NULL_RTX;
2979 new = simplify_subreg (outermode, op, innermode, byte);
2980 if (new)
2981 return new;
2983 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2984 return NULL_RTX;
2986 return gen_rtx_SUBREG (outermode, op, byte);
2988 /* Simplify X, an rtx expression.
2990 Return the simplified expression or NULL if no simplifications
2991 were possible.
2993 This is the preferred entry point into the simplification routines;
2994 however, we still allow passes to call the more specific routines.
2996 Right now GCC has three (yes, three) major bodies of RTL simplification
2997 code that need to be unified.
2999 1. fold_rtx in cse.c. This code uses various CSE specific
3000 information to aid in RTL simplification.
3002 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3003 it uses combine specific information to aid in RTL
3004 simplification.
3006 3. The routines in this file.
3009 Long term we want to only have one body of simplification code; to
3010 get to that state I recommend the following steps:
3012 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3013 which are not pass dependent state into these routines.
3015 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3016 use this routine whenever possible.
3018 3. Allow for pass dependent state to be provided to these
3019 routines and add simplifications based on the pass dependent
3020 state. Remove code from cse.c & combine.c that becomes
3021 redundant/dead.
3023 It will take time, but ultimately the compiler will be easier to
3024 maintain and improve. It's totally silly that when we add a
3025 simplification that it needs to be added to 4 places (3 for RTL
3026 simplification and 1 for tree simplification. */
3029 simplify_rtx (rtx x)
3031 enum rtx_code code = GET_CODE (x);
3032 enum machine_mode mode = GET_MODE (x);
3034 switch (GET_RTX_CLASS (code))
3036 case '1':
3037 return simplify_unary_operation (code, mode,
3038 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3039 case 'c':
3040 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3042 rtx tem;
3044 tem = XEXP (x, 0);
3045 XEXP (x, 0) = XEXP (x, 1);
3046 XEXP (x, 1) = tem;
3047 return simplify_binary_operation (code, mode,
3048 XEXP (x, 0), XEXP (x, 1));
3051 case '2':
3052 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3054 case '3':
3055 case 'b':
3056 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3057 XEXP (x, 0), XEXP (x, 1),
3058 XEXP (x, 2));
3060 case '<':
3061 return simplify_relational_operation (code,
3062 ((GET_MODE (XEXP (x, 0))
3063 != VOIDmode)
3064 ? GET_MODE (XEXP (x, 0))
3065 : GET_MODE (XEXP (x, 1))),
3066 XEXP (x, 0), XEXP (x, 1));
3067 case 'x':
3068 if (code == SUBREG)
3069 return simplify_gen_subreg (mode, SUBREG_REG (x),
3070 GET_MODE (SUBREG_REG (x)),
3071 SUBREG_BYTE (x));
3072 if (code == CONSTANT_P_RTX)
3074 if (CONSTANT_P (XEXP (x, 0)))
3075 return const1_rtx;
3077 return NULL;
3078 default:
3079 return NULL;