* cfgloopmanip.c (force_single_succ_latches): Fix missindentation.
[official-gcc.git] / gcc / simplify-rtx.c
blob19d664b14fb1ce41600d842dc819931e5e422f4f
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "flags.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "recog.h"
36 #include "function.h"
37 #include "expr.h"
38 #include "toplev.h"
39 #include "output.h"
40 #include "ggc.h"
41 #include "target.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
48 signed wide int. */
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
53 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
54 const void *));
55 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
56 enum machine_mode, rtx,
57 rtx, int));
59 /* Negate a CONST_INT rtx, truncating (because a conversion from a
60 maximally negative number can overflow). */
61 static rtx
62 neg_const_int (mode, i)
63 enum machine_mode mode;
64 rtx i;
66 return gen_int_mode (- INTVAL (i), mode);
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
73 rtx
74 simplify_gen_binary (code, mode, op0, op1)
75 enum rtx_code code;
76 enum machine_mode mode;
77 rtx op0, op1;
79 rtx tem;
81 /* Put complex operands first and constants second if commutative. */
82 if (GET_RTX_CLASS (code) == 'c'
83 && swap_commutative_operands_p (op0, op1))
84 tem = op0, op0 = op1, op1 = tem;
86 /* If this simplifies, do it. */
87 tem = simplify_binary_operation (code, mode, op0, op1);
88 if (tem)
89 return tem;
91 /* Handle addition and subtraction specially. Otherwise, just form
92 the operation. */
94 if (code == PLUS || code == MINUS)
96 tem = simplify_plus_minus (code, mode, op0, op1, 1);
97 if (tem)
98 return tem;
101 return gen_rtx_fmt_ee (code, mode, op0, op1);
104 /* If X is a MEM referencing the constant pool, return the real value.
105 Otherwise return X. */
107 avoid_constant_pool_reference (x)
108 rtx x;
110 rtx c, tmp, addr;
111 enum machine_mode cmode;
113 switch (GET_CODE (x))
115 case MEM:
116 break;
118 case FLOAT_EXTEND:
119 /* Handle float extensions of constant pool references. */
120 tmp = XEXP (x, 0);
121 c = avoid_constant_pool_reference (tmp);
122 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
124 REAL_VALUE_TYPE d;
126 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
127 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
129 return x;
131 default:
132 return x;
135 addr = XEXP (x, 0);
137 /* Call target hook to avoid the effects of -fpic etc... */
138 addr = (*targetm.delegitimize_address) (addr);
140 if (GET_CODE (addr) == LO_SUM)
141 addr = XEXP (addr, 1);
143 if (GET_CODE (addr) != SYMBOL_REF
144 || ! CONSTANT_POOL_ADDRESS_P (addr))
145 return x;
147 c = get_pool_constant (addr);
148 cmode = get_pool_mode (addr);
150 /* If we're accessing the constant in a different mode than it was
151 originally stored, attempt to fix that up via subreg simplifications.
152 If that fails we have no choice but to return the original memory. */
153 if (cmode != GET_MODE (x))
155 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
156 return c ? c : x;
159 return c;
162 /* Make a unary operation by first seeing if it folds and otherwise making
163 the specified operation. */
166 simplify_gen_unary (code, mode, op, op_mode)
167 enum rtx_code code;
168 enum machine_mode mode;
169 rtx op;
170 enum machine_mode op_mode;
172 rtx tem;
174 /* If this simplifies, use it. */
175 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
176 return tem;
178 return gen_rtx_fmt_e (code, mode, op);
181 /* Likewise for ternary operations. */
184 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
185 enum rtx_code code;
186 enum machine_mode mode, op0_mode;
187 rtx op0, op1, op2;
189 rtx tem;
191 /* If this simplifies, use it. */
192 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
193 op0, op1, op2)))
194 return tem;
196 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
199 /* Likewise, for relational operations.
200 CMP_MODE specifies mode comparison is done in.
204 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
205 enum rtx_code code;
206 enum machine_mode mode;
207 enum machine_mode cmp_mode;
208 rtx op0, op1;
210 rtx tem;
212 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
213 return tem;
215 /* For the following tests, ensure const0_rtx is op1. */
216 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
217 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
219 /* If op0 is a compare, extract the comparison arguments from it. */
220 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
221 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
223 /* If op0 is a comparison, extract the comparison arguments form it. */
224 if (code == NE && op1 == const0_rtx
225 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
226 return op0;
227 else if (code == EQ && op1 == const0_rtx)
229 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
230 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
231 if (new != UNKNOWN)
233 code = new;
234 mode = cmp_mode;
235 op1 = XEXP (op0, 1);
236 op0 = XEXP (op0, 0);
240 /* Put complex operands first and constants second. */
241 if (swap_commutative_operands_p (op0, op1))
242 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
244 return gen_rtx_fmt_ee (code, mode, op0, op1);
247 /* Replace all occurrences of OLD in X with NEW and try to simplify the
248 resulting RTX. Return a new RTX which is as simplified as possible. */
251 simplify_replace_rtx (x, old, new)
252 rtx x;
253 rtx old;
254 rtx new;
256 enum rtx_code code = GET_CODE (x);
257 enum machine_mode mode = GET_MODE (x);
259 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
260 to build a new expression substituting recursively. If we can't do
261 anything, return our input. */
263 if (x == old)
264 return new;
266 switch (GET_RTX_CLASS (code))
268 case '1':
270 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
271 rtx op = (XEXP (x, 0) == old
272 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
274 return simplify_gen_unary (code, mode, op, op_mode);
277 case '2':
278 case 'c':
279 return
280 simplify_gen_binary (code, mode,
281 simplify_replace_rtx (XEXP (x, 0), old, new),
282 simplify_replace_rtx (XEXP (x, 1), old, new));
283 case '<':
285 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
286 ? GET_MODE (XEXP (x, 0))
287 : GET_MODE (XEXP (x, 1)));
288 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
289 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
291 return
292 simplify_gen_relational (code, mode,
293 (op_mode != VOIDmode
294 ? op_mode
295 : GET_MODE (op0) != VOIDmode
296 ? GET_MODE (op0)
297 : GET_MODE (op1)),
298 op0, op1);
301 case '3':
302 case 'b':
304 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
305 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
307 return
308 simplify_gen_ternary (code, mode,
309 (op_mode != VOIDmode
310 ? op_mode
311 : GET_MODE (op0)),
312 op0,
313 simplify_replace_rtx (XEXP (x, 1), old, new),
314 simplify_replace_rtx (XEXP (x, 2), old, new));
317 case 'x':
318 /* The only case we try to handle is a SUBREG. */
319 if (code == SUBREG)
321 rtx exp;
322 exp = simplify_gen_subreg (GET_MODE (x),
323 simplify_replace_rtx (SUBREG_REG (x),
324 old, new),
325 GET_MODE (SUBREG_REG (x)),
326 SUBREG_BYTE (x));
327 if (exp)
328 x = exp;
330 return x;
332 case 'o':
333 if (code == MEM)
334 return replace_equiv_address_nv (x,
335 simplify_replace_rtx (XEXP (x, 0),
336 old, new));
337 else if (code == LO_SUM)
339 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
340 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
342 /* (lo_sum (high x) x) -> x */
343 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
344 return op1;
346 return gen_rtx_LO_SUM (mode, op0, op1);
348 else if (code == REG)
350 if (REG_P (old) && REGNO (x) == REGNO (old))
351 return new;
354 return x;
356 default:
357 return x;
359 return x;
362 /* Try to simplify a unary operation CODE whose output mode is to be
363 MODE with input operand OP whose mode was originally OP_MODE.
364 Return zero if no simplification can be made. */
366 simplify_unary_operation (code, mode, op, op_mode)
367 enum rtx_code code;
368 enum machine_mode mode;
369 rtx op;
370 enum machine_mode op_mode;
372 unsigned int width = GET_MODE_BITSIZE (mode);
373 rtx trueop = avoid_constant_pool_reference (op);
375 /* The order of these tests is critical so that, for example, we don't
376 check the wrong mode (input vs. output) for a conversion operation,
377 such as FIX. At some point, this should be simplified. */
379 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
380 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
382 HOST_WIDE_INT hv, lv;
383 REAL_VALUE_TYPE d;
385 if (GET_CODE (trueop) == CONST_INT)
386 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
387 else
388 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
390 REAL_VALUE_FROM_INT (d, lv, hv, mode);
391 d = real_value_truncate (mode, d);
392 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
394 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
395 && (GET_CODE (trueop) == CONST_DOUBLE
396 || GET_CODE (trueop) == CONST_INT))
398 HOST_WIDE_INT hv, lv;
399 REAL_VALUE_TYPE d;
401 if (GET_CODE (trueop) == CONST_INT)
402 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
403 else
404 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
406 if (op_mode == VOIDmode)
408 /* We don't know how to interpret negative-looking numbers in
409 this case, so don't try to fold those. */
410 if (hv < 0)
411 return 0;
413 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
415 else
416 hv = 0, lv &= GET_MODE_MASK (op_mode);
418 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
419 d = real_value_truncate (mode, d);
420 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
423 if (GET_CODE (trueop) == CONST_INT
424 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
426 HOST_WIDE_INT arg0 = INTVAL (trueop);
427 HOST_WIDE_INT val;
429 switch (code)
431 case NOT:
432 val = ~ arg0;
433 break;
435 case NEG:
436 val = - arg0;
437 break;
439 case ABS:
440 val = (arg0 >= 0 ? arg0 : - arg0);
441 break;
443 case FFS:
444 /* Don't use ffs here. Instead, get low order bit and then its
445 number. If arg0 is zero, this will return 0, as desired. */
446 arg0 &= GET_MODE_MASK (mode);
447 val = exact_log2 (arg0 & (- arg0)) + 1;
448 break;
450 case CLZ:
451 arg0 &= GET_MODE_MASK (mode);
452 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
453 break;
455 case CTZ:
456 arg0 &= GET_MODE_MASK (mode);
457 val = arg0 == 0
458 ? GET_MODE_BITSIZE (mode)
459 : exact_log2 (arg0 & -arg0);
460 break;
462 case POPCOUNT:
463 arg0 &= GET_MODE_MASK (mode);
464 val = 0;
465 while (arg0)
466 val++, arg0 &= arg0 - 1;
467 break;
469 case PARITY:
470 arg0 &= GET_MODE_MASK (mode);
471 val = 0;
472 while (arg0)
473 val++, arg0 &= arg0 - 1;
474 val &= 1;
475 break;
477 case TRUNCATE:
478 val = arg0;
479 break;
481 case ZERO_EXTEND:
482 /* When zero-extending a CONST_INT, we need to know its
483 original mode. */
484 if (op_mode == VOIDmode)
485 abort ();
486 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
488 /* If we were really extending the mode,
489 we would have to distinguish between zero-extension
490 and sign-extension. */
491 if (width != GET_MODE_BITSIZE (op_mode))
492 abort ();
493 val = arg0;
495 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
496 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
497 else
498 return 0;
499 break;
501 case SIGN_EXTEND:
502 if (op_mode == VOIDmode)
503 op_mode = mode;
504 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
506 /* If we were really extending the mode,
507 we would have to distinguish between zero-extension
508 and sign-extension. */
509 if (width != GET_MODE_BITSIZE (op_mode))
510 abort ();
511 val = arg0;
513 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
516 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
517 if (val
518 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
519 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
521 else
522 return 0;
523 break;
525 case SQRT:
526 case FLOAT_EXTEND:
527 case FLOAT_TRUNCATE:
528 case SS_TRUNCATE:
529 case US_TRUNCATE:
530 return 0;
532 default:
533 abort ();
536 val = trunc_int_for_mode (val, mode);
538 return GEN_INT (val);
541 /* We can do some operations on integer CONST_DOUBLEs. Also allow
542 for a DImode operation on a CONST_INT. */
543 else if (GET_MODE (trueop) == VOIDmode
544 && width <= HOST_BITS_PER_WIDE_INT * 2
545 && (GET_CODE (trueop) == CONST_DOUBLE
546 || GET_CODE (trueop) == CONST_INT))
548 unsigned HOST_WIDE_INT l1, lv;
549 HOST_WIDE_INT h1, hv;
551 if (GET_CODE (trueop) == CONST_DOUBLE)
552 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
553 else
554 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
556 switch (code)
558 case NOT:
559 lv = ~ l1;
560 hv = ~ h1;
561 break;
563 case NEG:
564 neg_double (l1, h1, &lv, &hv);
565 break;
567 case ABS:
568 if (h1 < 0)
569 neg_double (l1, h1, &lv, &hv);
570 else
571 lv = l1, hv = h1;
572 break;
574 case FFS:
575 hv = 0;
576 if (l1 == 0)
578 if (h1 == 0)
579 lv = 0;
580 else
581 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
583 else
584 lv = exact_log2 (l1 & -l1) + 1;
585 break;
587 case CLZ:
588 hv = 0;
589 if (h1 == 0)
590 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
591 else
592 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
593 - HOST_BITS_PER_WIDE_INT;
594 break;
596 case CTZ:
597 hv = 0;
598 if (l1 == 0)
600 if (h1 == 0)
601 lv = GET_MODE_BITSIZE (mode);
602 else
603 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
605 else
606 lv = exact_log2 (l1 & -l1);
607 break;
609 case POPCOUNT:
610 hv = 0;
611 lv = 0;
612 while (l1)
613 lv++, l1 &= l1 - 1;
614 while (h1)
615 lv++, h1 &= h1 - 1;
616 break;
618 case PARITY:
619 hv = 0;
620 lv = 0;
621 while (l1)
622 lv++, l1 &= l1 - 1;
623 while (h1)
624 lv++, h1 &= h1 - 1;
625 lv &= 1;
626 break;
628 case TRUNCATE:
629 /* This is just a change-of-mode, so do nothing. */
630 lv = l1, hv = h1;
631 break;
633 case ZERO_EXTEND:
634 if (op_mode == VOIDmode)
635 abort ();
637 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
638 return 0;
640 hv = 0;
641 lv = l1 & GET_MODE_MASK (op_mode);
642 break;
644 case SIGN_EXTEND:
645 if (op_mode == VOIDmode
646 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
647 return 0;
648 else
650 lv = l1 & GET_MODE_MASK (op_mode);
651 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
652 && (lv & ((HOST_WIDE_INT) 1
653 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
654 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
656 hv = HWI_SIGN_EXTEND (lv);
658 break;
660 case SQRT:
661 return 0;
663 default:
664 return 0;
667 return immed_double_const (lv, hv, mode);
670 else if (GET_CODE (trueop) == CONST_DOUBLE
671 && GET_MODE_CLASS (mode) == MODE_FLOAT)
673 REAL_VALUE_TYPE d, t;
674 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
676 switch (code)
678 case SQRT:
679 if (HONOR_SNANS (mode) && real_isnan (&d))
680 return 0;
681 real_sqrt (&t, mode, &d);
682 d = t;
683 break;
684 case ABS:
685 d = REAL_VALUE_ABS (d);
686 break;
687 case NEG:
688 d = REAL_VALUE_NEGATE (d);
689 break;
690 case FLOAT_TRUNCATE:
691 d = real_value_truncate (mode, d);
692 break;
693 case FLOAT_EXTEND:
694 /* All this does is change the mode. */
695 break;
696 case FIX:
697 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
698 break;
700 default:
701 abort ();
703 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
706 else if (GET_CODE (trueop) == CONST_DOUBLE
707 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
708 && GET_MODE_CLASS (mode) == MODE_INT
709 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
711 HOST_WIDE_INT i;
712 REAL_VALUE_TYPE d;
713 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
714 switch (code)
716 case FIX: i = REAL_VALUE_FIX (d); break;
717 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
718 default:
719 abort ();
721 return gen_int_mode (i, mode);
724 /* This was formerly used only for non-IEEE float.
725 eggert@twinsun.com says it is safe for IEEE also. */
726 else
728 enum rtx_code reversed;
729 /* There are some simplifications we can do even if the operands
730 aren't constant. */
731 switch (code)
733 case NOT:
734 /* (not (not X)) == X. */
735 if (GET_CODE (op) == NOT)
736 return XEXP (op, 0);
738 /* (not (eq X Y)) == (ne X Y), etc. */
739 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
740 && ((reversed = reversed_comparison_code (op, NULL_RTX))
741 != UNKNOWN))
742 return gen_rtx_fmt_ee (reversed,
743 op_mode, XEXP (op, 0), XEXP (op, 1));
744 break;
746 case NEG:
747 /* (neg (neg X)) == X. */
748 if (GET_CODE (op) == NEG)
749 return XEXP (op, 0);
750 break;
752 case SIGN_EXTEND:
753 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
754 becomes just the MINUS if its mode is MODE. This allows
755 folding switch statements on machines using casesi (such as
756 the VAX). */
757 if (GET_CODE (op) == TRUNCATE
758 && GET_MODE (XEXP (op, 0)) == mode
759 && GET_CODE (XEXP (op, 0)) == MINUS
760 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
761 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
762 return XEXP (op, 0);
764 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
765 if (! POINTERS_EXTEND_UNSIGNED
766 && mode == Pmode && GET_MODE (op) == ptr_mode
767 && (CONSTANT_P (op)
768 || (GET_CODE (op) == SUBREG
769 && GET_CODE (SUBREG_REG (op)) == REG
770 && REG_POINTER (SUBREG_REG (op))
771 && GET_MODE (SUBREG_REG (op)) == Pmode)))
772 return convert_memory_address (Pmode, op);
773 #endif
774 break;
776 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
777 case ZERO_EXTEND:
778 if (POINTERS_EXTEND_UNSIGNED > 0
779 && mode == Pmode && GET_MODE (op) == ptr_mode
780 && (CONSTANT_P (op)
781 || (GET_CODE (op) == SUBREG
782 && GET_CODE (SUBREG_REG (op)) == REG
783 && REG_POINTER (SUBREG_REG (op))
784 && GET_MODE (SUBREG_REG (op)) == Pmode)))
785 return convert_memory_address (Pmode, op);
786 break;
787 #endif
789 default:
790 break;
793 return 0;
797 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
798 and OP1. Return 0 if no simplification is possible.
800 Don't use this for relational operations such as EQ or LT.
801 Use simplify_relational_operation instead. */
803 simplify_binary_operation (code, mode, op0, op1)
804 enum rtx_code code;
805 enum machine_mode mode;
806 rtx op0, op1;
808 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
809 HOST_WIDE_INT val;
810 unsigned int width = GET_MODE_BITSIZE (mode);
811 rtx tem;
812 rtx trueop0 = avoid_constant_pool_reference (op0);
813 rtx trueop1 = avoid_constant_pool_reference (op1);
815 /* Relational operations don't work here. We must know the mode
816 of the operands in order to do the comparison correctly.
817 Assuming a full word can give incorrect results.
818 Consider comparing 128 with -128 in QImode. */
820 if (GET_RTX_CLASS (code) == '<')
821 abort ();
823 /* Make sure the constant is second. */
824 if (GET_RTX_CLASS (code) == 'c'
825 && swap_commutative_operands_p (trueop0, trueop1))
827 tem = op0, op0 = op1, op1 = tem;
828 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
831 if (GET_MODE_CLASS (mode) == MODE_FLOAT
832 && GET_CODE (trueop0) == CONST_DOUBLE
833 && GET_CODE (trueop1) == CONST_DOUBLE
834 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
836 REAL_VALUE_TYPE f0, f1, value;
838 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
839 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
840 f0 = real_value_truncate (mode, f0);
841 f1 = real_value_truncate (mode, f1);
843 if (code == DIV
844 && !MODE_HAS_INFINITIES (mode)
845 && REAL_VALUES_EQUAL (f1, dconst0))
846 return 0;
848 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
850 value = real_value_truncate (mode, value);
851 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
854 /* We can fold some multi-word operations. */
855 if (GET_MODE_CLASS (mode) == MODE_INT
856 && width == HOST_BITS_PER_WIDE_INT * 2
857 && (GET_CODE (trueop0) == CONST_DOUBLE
858 || GET_CODE (trueop0) == CONST_INT)
859 && (GET_CODE (trueop1) == CONST_DOUBLE
860 || GET_CODE (trueop1) == CONST_INT))
862 unsigned HOST_WIDE_INT l1, l2, lv;
863 HOST_WIDE_INT h1, h2, hv;
865 if (GET_CODE (trueop0) == CONST_DOUBLE)
866 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
867 else
868 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
870 if (GET_CODE (trueop1) == CONST_DOUBLE)
871 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
872 else
873 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
875 switch (code)
877 case MINUS:
878 /* A - B == A + (-B). */
879 neg_double (l2, h2, &lv, &hv);
880 l2 = lv, h2 = hv;
882 /* .. fall through ... */
884 case PLUS:
885 add_double (l1, h1, l2, h2, &lv, &hv);
886 break;
888 case MULT:
889 mul_double (l1, h1, l2, h2, &lv, &hv);
890 break;
892 case DIV: case MOD: case UDIV: case UMOD:
893 /* We'd need to include tree.h to do this and it doesn't seem worth
894 it. */
895 return 0;
897 case AND:
898 lv = l1 & l2, hv = h1 & h2;
899 break;
901 case IOR:
902 lv = l1 | l2, hv = h1 | h2;
903 break;
905 case XOR:
906 lv = l1 ^ l2, hv = h1 ^ h2;
907 break;
909 case SMIN:
910 if (h1 < h2
911 || (h1 == h2
912 && ((unsigned HOST_WIDE_INT) l1
913 < (unsigned HOST_WIDE_INT) l2)))
914 lv = l1, hv = h1;
915 else
916 lv = l2, hv = h2;
917 break;
919 case SMAX:
920 if (h1 > h2
921 || (h1 == h2
922 && ((unsigned HOST_WIDE_INT) l1
923 > (unsigned HOST_WIDE_INT) l2)))
924 lv = l1, hv = h1;
925 else
926 lv = l2, hv = h2;
927 break;
929 case UMIN:
930 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
931 || (h1 == h2
932 && ((unsigned HOST_WIDE_INT) l1
933 < (unsigned HOST_WIDE_INT) l2)))
934 lv = l1, hv = h1;
935 else
936 lv = l2, hv = h2;
937 break;
939 case UMAX:
940 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
941 || (h1 == h2
942 && ((unsigned HOST_WIDE_INT) l1
943 > (unsigned HOST_WIDE_INT) l2)))
944 lv = l1, hv = h1;
945 else
946 lv = l2, hv = h2;
947 break;
949 case LSHIFTRT: case ASHIFTRT:
950 case ASHIFT:
951 case ROTATE: case ROTATERT:
952 #ifdef SHIFT_COUNT_TRUNCATED
953 if (SHIFT_COUNT_TRUNCATED)
954 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
955 #endif
957 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
958 return 0;
960 if (code == LSHIFTRT || code == ASHIFTRT)
961 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
962 code == ASHIFTRT);
963 else if (code == ASHIFT)
964 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
965 else if (code == ROTATE)
966 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
967 else /* code == ROTATERT */
968 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
969 break;
971 default:
972 return 0;
975 return immed_double_const (lv, hv, mode);
978 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
979 || width > HOST_BITS_PER_WIDE_INT || width == 0)
981 /* Even if we can't compute a constant result,
982 there are some cases worth simplifying. */
984 switch (code)
986 case PLUS:
987 /* Maybe simplify x + 0 to x. The two expressions are equivalent
988 when x is NaN, infinite, or finite and nonzero. They aren't
989 when x is -0 and the rounding mode is not towards -infinity,
990 since (-0) + 0 is then 0. */
991 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
992 return op0;
994 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
995 transformations are safe even for IEEE. */
996 if (GET_CODE (op0) == NEG)
997 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
998 else if (GET_CODE (op1) == NEG)
999 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1001 /* (~a) + 1 -> -a */
1002 if (INTEGRAL_MODE_P (mode)
1003 && GET_CODE (op0) == NOT
1004 && trueop1 == const1_rtx)
1005 return gen_rtx_NEG (mode, XEXP (op0, 0));
1007 /* Handle both-operands-constant cases. We can only add
1008 CONST_INTs to constants since the sum of relocatable symbols
1009 can't be handled by most assemblers. Don't add CONST_INT
1010 to CONST_INT since overflow won't be computed properly if wider
1011 than HOST_BITS_PER_WIDE_INT. */
1013 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1014 && GET_CODE (op1) == CONST_INT)
1015 return plus_constant (op0, INTVAL (op1));
1016 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1017 && GET_CODE (op0) == CONST_INT)
1018 return plus_constant (op1, INTVAL (op0));
1020 /* See if this is something like X * C - X or vice versa or
1021 if the multiplication is written as a shift. If so, we can
1022 distribute and make a new multiply, shift, or maybe just
1023 have X (if C is 2 in the example above). But don't make
1024 real multiply if we didn't have one before. */
1026 if (! FLOAT_MODE_P (mode))
1028 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1029 rtx lhs = op0, rhs = op1;
1030 int had_mult = 0;
1032 if (GET_CODE (lhs) == NEG)
1033 coeff0 = -1, lhs = XEXP (lhs, 0);
1034 else if (GET_CODE (lhs) == MULT
1035 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1037 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1038 had_mult = 1;
1040 else if (GET_CODE (lhs) == ASHIFT
1041 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1042 && INTVAL (XEXP (lhs, 1)) >= 0
1043 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1045 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1046 lhs = XEXP (lhs, 0);
1049 if (GET_CODE (rhs) == NEG)
1050 coeff1 = -1, rhs = XEXP (rhs, 0);
1051 else if (GET_CODE (rhs) == MULT
1052 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1054 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1055 had_mult = 1;
1057 else if (GET_CODE (rhs) == ASHIFT
1058 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1059 && INTVAL (XEXP (rhs, 1)) >= 0
1060 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1062 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1063 rhs = XEXP (rhs, 0);
1066 if (rtx_equal_p (lhs, rhs))
1068 tem = simplify_gen_binary (MULT, mode, lhs,
1069 GEN_INT (coeff0 + coeff1));
1070 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1074 /* If one of the operands is a PLUS or a MINUS, see if we can
1075 simplify this by the associative law.
1076 Don't use the associative law for floating point.
1077 The inaccuracy makes it nonassociative,
1078 and subtle programs can break if operations are associated. */
1080 if (INTEGRAL_MODE_P (mode)
1081 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1082 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1083 || (GET_CODE (op0) == CONST
1084 && GET_CODE (XEXP (op0, 0)) == PLUS)
1085 || (GET_CODE (op1) == CONST
1086 && GET_CODE (XEXP (op1, 0)) == PLUS))
1087 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1088 return tem;
1089 break;
1091 case COMPARE:
1092 #ifdef HAVE_cc0
1093 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1094 using cc0, in which case we want to leave it as a COMPARE
1095 so we can distinguish it from a register-register-copy.
1097 In IEEE floating point, x-0 is not the same as x. */
1099 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1100 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1101 && trueop1 == CONST0_RTX (mode))
1102 return op0;
1103 #endif
1105 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1106 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1107 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1108 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1110 rtx xop00 = XEXP (op0, 0);
1111 rtx xop10 = XEXP (op1, 0);
1113 #ifdef HAVE_cc0
1114 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1115 #else
1116 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1117 && GET_MODE (xop00) == GET_MODE (xop10)
1118 && REGNO (xop00) == REGNO (xop10)
1119 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1120 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1121 #endif
1122 return xop00;
1124 break;
1126 case MINUS:
1127 /* We can't assume x-x is 0 even with non-IEEE floating point,
1128 but since it is zero except in very strange circumstances, we
1129 will treat it as zero with -funsafe-math-optimizations. */
1130 if (rtx_equal_p (trueop0, trueop1)
1131 && ! side_effects_p (op0)
1132 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1133 return CONST0_RTX (mode);
1135 /* Change subtraction from zero into negation. (0 - x) is the
1136 same as -x when x is NaN, infinite, or finite and nonzero.
1137 But if the mode has signed zeros, and does not round towards
1138 -infinity, then 0 - 0 is 0, not -0. */
1139 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1140 return gen_rtx_NEG (mode, op1);
1142 /* (-1 - a) is ~a. */
1143 if (trueop0 == constm1_rtx)
1144 return gen_rtx_NOT (mode, op1);
1146 /* Subtracting 0 has no effect unless the mode has signed zeros
1147 and supports rounding towards -infinity. In such a case,
1148 0 - 0 is -0. */
1149 if (!(HONOR_SIGNED_ZEROS (mode)
1150 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1151 && trueop1 == CONST0_RTX (mode))
1152 return op0;
1154 /* See if this is something like X * C - X or vice versa or
1155 if the multiplication is written as a shift. If so, we can
1156 distribute and make a new multiply, shift, or maybe just
1157 have X (if C is 2 in the example above). But don't make
1158 real multiply if we didn't have one before. */
1160 if (! FLOAT_MODE_P (mode))
1162 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1163 rtx lhs = op0, rhs = op1;
1164 int had_mult = 0;
1166 if (GET_CODE (lhs) == NEG)
1167 coeff0 = -1, lhs = XEXP (lhs, 0);
1168 else if (GET_CODE (lhs) == MULT
1169 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1171 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1172 had_mult = 1;
1174 else if (GET_CODE (lhs) == ASHIFT
1175 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1176 && INTVAL (XEXP (lhs, 1)) >= 0
1177 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1179 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1180 lhs = XEXP (lhs, 0);
1183 if (GET_CODE (rhs) == NEG)
1184 coeff1 = - 1, rhs = XEXP (rhs, 0);
1185 else if (GET_CODE (rhs) == MULT
1186 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1188 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1189 had_mult = 1;
1191 else if (GET_CODE (rhs) == ASHIFT
1192 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1193 && INTVAL (XEXP (rhs, 1)) >= 0
1194 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1196 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1197 rhs = XEXP (rhs, 0);
1200 if (rtx_equal_p (lhs, rhs))
1202 tem = simplify_gen_binary (MULT, mode, lhs,
1203 GEN_INT (coeff0 - coeff1));
1204 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1208 /* (a - (-b)) -> (a + b). True even for IEEE. */
1209 if (GET_CODE (op1) == NEG)
1210 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1212 /* If one of the operands is a PLUS or a MINUS, see if we can
1213 simplify this by the associative law.
1214 Don't use the associative law for floating point.
1215 The inaccuracy makes it nonassociative,
1216 and subtle programs can break if operations are associated. */
1218 if (INTEGRAL_MODE_P (mode)
1219 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1220 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1221 || (GET_CODE (op0) == CONST
1222 && GET_CODE (XEXP (op0, 0)) == PLUS)
1223 || (GET_CODE (op1) == CONST
1224 && GET_CODE (XEXP (op1, 0)) == PLUS))
1225 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1226 return tem;
1228 /* Don't let a relocatable value get a negative coeff. */
1229 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1230 return simplify_gen_binary (PLUS, mode,
1231 op0,
1232 neg_const_int (mode, op1));
1234 /* (x - (x & y)) -> (x & ~y) */
1235 if (GET_CODE (op1) == AND)
1237 if (rtx_equal_p (op0, XEXP (op1, 0)))
1238 return simplify_gen_binary (AND, mode, op0,
1239 gen_rtx_NOT (mode, XEXP (op1, 1)));
1240 if (rtx_equal_p (op0, XEXP (op1, 1)))
1241 return simplify_gen_binary (AND, mode, op0,
1242 gen_rtx_NOT (mode, XEXP (op1, 0)));
1244 break;
1246 case MULT:
1247 if (trueop1 == constm1_rtx)
1249 tem = simplify_unary_operation (NEG, mode, op0, mode);
1251 return tem ? tem : gen_rtx_NEG (mode, op0);
1254 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1255 x is NaN, since x * 0 is then also NaN. Nor is it valid
1256 when the mode has signed zeros, since multiplying a negative
1257 number by 0 will give -0, not 0. */
1258 if (!HONOR_NANS (mode)
1259 && !HONOR_SIGNED_ZEROS (mode)
1260 && trueop1 == CONST0_RTX (mode)
1261 && ! side_effects_p (op0))
1262 return op1;
1264 /* In IEEE floating point, x*1 is not equivalent to x for
1265 signalling NaNs. */
1266 if (!HONOR_SNANS (mode)
1267 && trueop1 == CONST1_RTX (mode))
1268 return op0;
1270 /* Convert multiply by constant power of two into shift unless
1271 we are still generating RTL. This test is a kludge. */
1272 if (GET_CODE (trueop1) == CONST_INT
1273 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1274 /* If the mode is larger than the host word size, and the
1275 uppermost bit is set, then this isn't a power of two due
1276 to implicit sign extension. */
1277 && (width <= HOST_BITS_PER_WIDE_INT
1278 || val != HOST_BITS_PER_WIDE_INT - 1)
1279 && ! rtx_equal_function_value_matters)
1280 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1282 /* x*2 is x+x and x*(-1) is -x */
1283 if (GET_CODE (trueop1) == CONST_DOUBLE
1284 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1285 && GET_MODE (op0) == mode)
1287 REAL_VALUE_TYPE d;
1288 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1290 if (REAL_VALUES_EQUAL (d, dconst2))
1291 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1293 if (REAL_VALUES_EQUAL (d, dconstm1))
1294 return gen_rtx_NEG (mode, op0);
1296 break;
1298 case IOR:
1299 if (trueop1 == const0_rtx)
1300 return op0;
1301 if (GET_CODE (trueop1) == CONST_INT
1302 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1303 == GET_MODE_MASK (mode)))
1304 return op1;
1305 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1306 return op0;
1307 /* A | (~A) -> -1 */
1308 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1309 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1310 && ! side_effects_p (op0)
1311 && GET_MODE_CLASS (mode) != MODE_CC)
1312 return constm1_rtx;
1313 break;
1315 case XOR:
1316 if (trueop1 == const0_rtx)
1317 return op0;
1318 if (GET_CODE (trueop1) == CONST_INT
1319 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1320 == GET_MODE_MASK (mode)))
1321 return gen_rtx_NOT (mode, op0);
1322 if (trueop0 == trueop1 && ! side_effects_p (op0)
1323 && GET_MODE_CLASS (mode) != MODE_CC)
1324 return const0_rtx;
1325 break;
1327 case AND:
1328 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1329 return const0_rtx;
1330 if (GET_CODE (trueop1) == CONST_INT
1331 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1332 == GET_MODE_MASK (mode)))
1333 return op0;
1334 if (trueop0 == trueop1 && ! side_effects_p (op0)
1335 && GET_MODE_CLASS (mode) != MODE_CC)
1336 return op0;
1337 /* A & (~A) -> 0 */
1338 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1339 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1340 && ! side_effects_p (op0)
1341 && GET_MODE_CLASS (mode) != MODE_CC)
1342 return const0_rtx;
1343 break;
1345 case UDIV:
1346 /* Convert divide by power of two into shift (divide by 1 handled
1347 below). */
1348 if (GET_CODE (trueop1) == CONST_INT
1349 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1350 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1352 /* ... fall through ... */
1354 case DIV:
1355 if (trueop1 == CONST1_RTX (mode))
1357 /* On some platforms DIV uses narrower mode than its
1358 operands. */
1359 rtx x = gen_lowpart_common (mode, op0);
1360 if (x)
1361 return x;
1362 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1363 return gen_lowpart_SUBREG (mode, op0);
1364 else
1365 return op0;
1368 /* Maybe change 0 / x to 0. This transformation isn't safe for
1369 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1370 Nor is it safe for modes with signed zeros, since dividing
1371 0 by a negative number gives -0, not 0. */
1372 if (!HONOR_NANS (mode)
1373 && !HONOR_SIGNED_ZEROS (mode)
1374 && trueop0 == CONST0_RTX (mode)
1375 && ! side_effects_p (op1))
1376 return op0;
1378 /* Change division by a constant into multiplication. Only do
1379 this with -funsafe-math-optimizations. */
1380 else if (GET_CODE (trueop1) == CONST_DOUBLE
1381 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1382 && trueop1 != CONST0_RTX (mode)
1383 && flag_unsafe_math_optimizations)
1385 REAL_VALUE_TYPE d;
1386 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1388 if (! REAL_VALUES_EQUAL (d, dconst0))
1390 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1391 return gen_rtx_MULT (mode, op0,
1392 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1395 break;
1397 case UMOD:
1398 /* Handle modulus by power of two (mod with 1 handled below). */
1399 if (GET_CODE (trueop1) == CONST_INT
1400 && exact_log2 (INTVAL (trueop1)) > 0)
1401 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1403 /* ... fall through ... */
1405 case MOD:
1406 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1407 && ! side_effects_p (op0) && ! side_effects_p (op1))
1408 return const0_rtx;
1409 break;
1411 case ROTATERT:
1412 case ROTATE:
1413 case ASHIFTRT:
1414 /* Rotating ~0 always results in ~0. */
1415 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1416 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1417 && ! side_effects_p (op1))
1418 return op0;
1420 /* ... fall through ... */
1422 case ASHIFT:
1423 case LSHIFTRT:
1424 if (trueop1 == const0_rtx)
1425 return op0;
1426 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1427 return op0;
1428 break;
1430 case SMIN:
1431 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1432 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1433 && ! side_effects_p (op0))
1434 return op1;
1435 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1436 return op0;
1437 break;
1439 case SMAX:
1440 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1441 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1442 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1443 && ! side_effects_p (op0))
1444 return op1;
1445 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1446 return op0;
1447 break;
1449 case UMIN:
1450 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1451 return op1;
1452 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1453 return op0;
1454 break;
1456 case UMAX:
1457 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1458 return op1;
1459 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1460 return op0;
1461 break;
1463 case SS_PLUS:
1464 case US_PLUS:
1465 case SS_MINUS:
1466 case US_MINUS:
1467 /* ??? There are simplifications that can be done. */
1468 return 0;
1470 default:
1471 abort ();
1474 return 0;
1477 /* Get the integer argument values in two forms:
1478 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1480 arg0 = INTVAL (trueop0);
1481 arg1 = INTVAL (trueop1);
1483 if (width < HOST_BITS_PER_WIDE_INT)
1485 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1486 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1488 arg0s = arg0;
1489 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1490 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1492 arg1s = arg1;
1493 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1494 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1496 else
1498 arg0s = arg0;
1499 arg1s = arg1;
1502 /* Compute the value of the arithmetic. */
1504 switch (code)
1506 case PLUS:
1507 val = arg0s + arg1s;
1508 break;
1510 case MINUS:
1511 val = arg0s - arg1s;
1512 break;
1514 case MULT:
1515 val = arg0s * arg1s;
1516 break;
1518 case DIV:
1519 if (arg1s == 0
1520 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1521 && arg1s == -1))
1522 return 0;
1523 val = arg0s / arg1s;
1524 break;
1526 case MOD:
1527 if (arg1s == 0
1528 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1529 && arg1s == -1))
1530 return 0;
1531 val = arg0s % arg1s;
1532 break;
1534 case UDIV:
1535 if (arg1 == 0
1536 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1537 && arg1s == -1))
1538 return 0;
1539 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1540 break;
1542 case UMOD:
1543 if (arg1 == 0
1544 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1545 && arg1s == -1))
1546 return 0;
1547 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1548 break;
1550 case AND:
1551 val = arg0 & arg1;
1552 break;
1554 case IOR:
1555 val = arg0 | arg1;
1556 break;
1558 case XOR:
1559 val = arg0 ^ arg1;
1560 break;
1562 case LSHIFTRT:
1563 /* If shift count is undefined, don't fold it; let the machine do
1564 what it wants. But truncate it if the machine will do that. */
1565 if (arg1 < 0)
1566 return 0;
1568 #ifdef SHIFT_COUNT_TRUNCATED
1569 if (SHIFT_COUNT_TRUNCATED)
1570 arg1 %= width;
1571 #endif
1573 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1574 break;
1576 case ASHIFT:
1577 if (arg1 < 0)
1578 return 0;
1580 #ifdef SHIFT_COUNT_TRUNCATED
1581 if (SHIFT_COUNT_TRUNCATED)
1582 arg1 %= width;
1583 #endif
1585 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1586 break;
1588 case ASHIFTRT:
1589 if (arg1 < 0)
1590 return 0;
1592 #ifdef SHIFT_COUNT_TRUNCATED
1593 if (SHIFT_COUNT_TRUNCATED)
1594 arg1 %= width;
1595 #endif
1597 val = arg0s >> arg1;
1599 /* Bootstrap compiler may not have sign extended the right shift.
1600 Manually extend the sign to insure bootstrap cc matches gcc. */
1601 if (arg0s < 0 && arg1 > 0)
1602 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1604 break;
1606 case ROTATERT:
1607 if (arg1 < 0)
1608 return 0;
1610 arg1 %= width;
1611 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1612 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1613 break;
1615 case ROTATE:
1616 if (arg1 < 0)
1617 return 0;
1619 arg1 %= width;
1620 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1621 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1622 break;
1624 case COMPARE:
1625 /* Do nothing here. */
1626 return 0;
1628 case SMIN:
1629 val = arg0s <= arg1s ? arg0s : arg1s;
1630 break;
1632 case UMIN:
1633 val = ((unsigned HOST_WIDE_INT) arg0
1634 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1635 break;
1637 case SMAX:
1638 val = arg0s > arg1s ? arg0s : arg1s;
1639 break;
1641 case UMAX:
1642 val = ((unsigned HOST_WIDE_INT) arg0
1643 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1644 break;
1646 default:
1647 abort ();
1650 val = trunc_int_for_mode (val, mode);
1652 return GEN_INT (val);
1655 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1656 PLUS or MINUS.
1658 Rather than test for specific case, we do this by a brute-force method
1659 and do all possible simplifications until no more changes occur. Then
1660 we rebuild the operation.
1662 If FORCE is true, then always generate the rtx. This is used to
1663 canonicalize stuff emitted from simplify_gen_binary. Note that this
1664 can still fail if the rtx is too complex. It won't fail just because
1665 the result is not 'simpler' than the input, however. */
1667 struct simplify_plus_minus_op_data
1669 rtx op;
1670 int neg;
1673 static int
1674 simplify_plus_minus_op_data_cmp (p1, p2)
1675 const void *p1;
1676 const void *p2;
1678 const struct simplify_plus_minus_op_data *d1 = p1;
1679 const struct simplify_plus_minus_op_data *d2 = p2;
1681 return (commutative_operand_precedence (d2->op)
1682 - commutative_operand_precedence (d1->op));
1685 static rtx
1686 simplify_plus_minus (code, mode, op0, op1, force)
1687 enum rtx_code code;
1688 enum machine_mode mode;
1689 rtx op0, op1;
1690 int force;
1692 struct simplify_plus_minus_op_data ops[8];
1693 rtx result, tem;
1694 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1695 int first, negate, changed;
1696 int i, j;
1698 memset ((char *) ops, 0, sizeof ops);
1700 /* Set up the two operands and then expand them until nothing has been
1701 changed. If we run out of room in our array, give up; this should
1702 almost never happen. */
1704 ops[0].op = op0;
1705 ops[0].neg = 0;
1706 ops[1].op = op1;
1707 ops[1].neg = (code == MINUS);
1711 changed = 0;
1713 for (i = 0; i < n_ops; i++)
1715 rtx this_op = ops[i].op;
1716 int this_neg = ops[i].neg;
1717 enum rtx_code this_code = GET_CODE (this_op);
1719 switch (this_code)
1721 case PLUS:
1722 case MINUS:
1723 if (n_ops == 7)
1724 return NULL_RTX;
1726 ops[n_ops].op = XEXP (this_op, 1);
1727 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1728 n_ops++;
1730 ops[i].op = XEXP (this_op, 0);
1731 input_ops++;
1732 changed = 1;
1733 break;
1735 case NEG:
1736 ops[i].op = XEXP (this_op, 0);
1737 ops[i].neg = ! this_neg;
1738 changed = 1;
1739 break;
1741 case CONST:
1742 if (n_ops < 7
1743 && GET_CODE (XEXP (this_op, 0)) == PLUS
1744 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1745 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1747 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1748 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1749 ops[n_ops].neg = this_neg;
1750 n_ops++;
1751 input_consts++;
1752 changed = 1;
1754 break;
1756 case NOT:
1757 /* ~a -> (-a - 1) */
1758 if (n_ops != 7)
1760 ops[n_ops].op = constm1_rtx;
1761 ops[n_ops++].neg = this_neg;
1762 ops[i].op = XEXP (this_op, 0);
1763 ops[i].neg = !this_neg;
1764 changed = 1;
1766 break;
1768 case CONST_INT:
1769 if (this_neg)
1771 ops[i].op = neg_const_int (mode, this_op);
1772 ops[i].neg = 0;
1773 changed = 1;
1775 break;
1777 default:
1778 break;
1782 while (changed);
1784 /* If we only have two operands, we can't do anything. */
1785 if (n_ops <= 2 && !force)
1786 return NULL_RTX;
1788 /* Count the number of CONSTs we didn't split above. */
1789 for (i = 0; i < n_ops; i++)
1790 if (GET_CODE (ops[i].op) == CONST)
1791 input_consts++;
1793 /* Now simplify each pair of operands until nothing changes. The first
1794 time through just simplify constants against each other. */
1796 first = 1;
1799 changed = first;
1801 for (i = 0; i < n_ops - 1; i++)
1802 for (j = i + 1; j < n_ops; j++)
1804 rtx lhs = ops[i].op, rhs = ops[j].op;
1805 int lneg = ops[i].neg, rneg = ops[j].neg;
1807 if (lhs != 0 && rhs != 0
1808 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1810 enum rtx_code ncode = PLUS;
1812 if (lneg != rneg)
1814 ncode = MINUS;
1815 if (lneg)
1816 tem = lhs, lhs = rhs, rhs = tem;
1818 else if (swap_commutative_operands_p (lhs, rhs))
1819 tem = lhs, lhs = rhs, rhs = tem;
1821 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1823 /* Reject "simplifications" that just wrap the two
1824 arguments in a CONST. Failure to do so can result
1825 in infinite recursion with simplify_binary_operation
1826 when it calls us to simplify CONST operations. */
1827 if (tem
1828 && ! (GET_CODE (tem) == CONST
1829 && GET_CODE (XEXP (tem, 0)) == ncode
1830 && XEXP (XEXP (tem, 0), 0) == lhs
1831 && XEXP (XEXP (tem, 0), 1) == rhs)
1832 /* Don't allow -x + -1 -> ~x simplifications in the
1833 first pass. This allows us the chance to combine
1834 the -1 with other constants. */
1835 && ! (first
1836 && GET_CODE (tem) == NOT
1837 && XEXP (tem, 0) == rhs))
1839 lneg &= rneg;
1840 if (GET_CODE (tem) == NEG)
1841 tem = XEXP (tem, 0), lneg = !lneg;
1842 if (GET_CODE (tem) == CONST_INT && lneg)
1843 tem = neg_const_int (mode, tem), lneg = 0;
1845 ops[i].op = tem;
1846 ops[i].neg = lneg;
1847 ops[j].op = NULL_RTX;
1848 changed = 1;
1853 first = 0;
1855 while (changed);
1857 /* Pack all the operands to the lower-numbered entries. */
1858 for (i = 0, j = 0; j < n_ops; j++)
1859 if (ops[j].op)
1860 ops[i++] = ops[j];
1861 n_ops = i;
1863 /* Sort the operations based on swap_commutative_operands_p. */
1864 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1866 /* We suppressed creation of trivial CONST expressions in the
1867 combination loop to avoid recursion. Create one manually now.
1868 The combination loop should have ensured that there is exactly
1869 one CONST_INT, and the sort will have ensured that it is last
1870 in the array and that any other constant will be next-to-last. */
1872 if (n_ops > 1
1873 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1874 && CONSTANT_P (ops[n_ops - 2].op))
1876 rtx value = ops[n_ops - 1].op;
1877 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1878 value = neg_const_int (mode, value);
1879 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1880 n_ops--;
1883 /* Count the number of CONSTs that we generated. */
1884 n_consts = 0;
1885 for (i = 0; i < n_ops; i++)
1886 if (GET_CODE (ops[i].op) == CONST)
1887 n_consts++;
1889 /* Give up if we didn't reduce the number of operands we had. Make
1890 sure we count a CONST as two operands. If we have the same
1891 number of operands, but have made more CONSTs than before, this
1892 is also an improvement, so accept it. */
1893 if (!force
1894 && (n_ops + n_consts > input_ops
1895 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1896 return NULL_RTX;
1898 /* Put a non-negated operand first. If there aren't any, make all
1899 operands positive and negate the whole thing later. */
1901 negate = 0;
1902 for (i = 0; i < n_ops && ops[i].neg; i++)
1903 continue;
1904 if (i == n_ops)
1906 for (i = 0; i < n_ops; i++)
1907 ops[i].neg = 0;
1908 negate = 1;
1910 else if (i != 0)
1912 tem = ops[0].op;
1913 ops[0] = ops[i];
1914 ops[i].op = tem;
1915 ops[i].neg = 1;
1918 /* Now make the result by performing the requested operations. */
1919 result = ops[0].op;
1920 for (i = 1; i < n_ops; i++)
1921 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1922 mode, result, ops[i].op);
1924 return negate ? gen_rtx_NEG (mode, result) : result;
1927 /* Like simplify_binary_operation except used for relational operators.
1928 MODE is the mode of the operands, not that of the result. If MODE
1929 is VOIDmode, both operands must also be VOIDmode and we compare the
1930 operands in "infinite precision".
1932 If no simplification is possible, this function returns zero. Otherwise,
1933 it returns either const_true_rtx or const0_rtx. */
1936 simplify_relational_operation (code, mode, op0, op1)
1937 enum rtx_code code;
1938 enum machine_mode mode;
1939 rtx op0, op1;
1941 int equal, op0lt, op0ltu, op1lt, op1ltu;
1942 rtx tem;
1943 rtx trueop0;
1944 rtx trueop1;
1946 if (mode == VOIDmode
1947 && (GET_MODE (op0) != VOIDmode
1948 || GET_MODE (op1) != VOIDmode))
1949 abort ();
1951 /* If op0 is a compare, extract the comparison arguments from it. */
1952 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1953 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1955 trueop0 = avoid_constant_pool_reference (op0);
1956 trueop1 = avoid_constant_pool_reference (op1);
1958 /* We can't simplify MODE_CC values since we don't know what the
1959 actual comparison is. */
1960 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1961 #ifdef HAVE_cc0
1962 || op0 == cc0_rtx
1963 #endif
1965 return 0;
1967 /* Make sure the constant is second. */
1968 if (swap_commutative_operands_p (trueop0, trueop1))
1970 tem = op0, op0 = op1, op1 = tem;
1971 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1972 code = swap_condition (code);
1975 /* For integer comparisons of A and B maybe we can simplify A - B and can
1976 then simplify a comparison of that with zero. If A and B are both either
1977 a register or a CONST_INT, this can't help; testing for these cases will
1978 prevent infinite recursion here and speed things up.
1980 If CODE is an unsigned comparison, then we can never do this optimization,
1981 because it gives an incorrect result if the subtraction wraps around zero.
1982 ANSI C defines unsigned operations such that they never overflow, and
1983 thus such cases can not be ignored. */
1985 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
1986 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
1987 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
1988 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1989 && code != GTU && code != GEU && code != LTU && code != LEU)
1990 return simplify_relational_operation (signed_condition (code),
1991 mode, tem, const0_rtx);
1993 if (flag_unsafe_math_optimizations && code == ORDERED)
1994 return const_true_rtx;
1996 if (flag_unsafe_math_optimizations && code == UNORDERED)
1997 return const0_rtx;
1999 /* For modes without NaNs, if the two operands are equal, we know the
2000 result. */
2001 if (!HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1))
2002 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2004 /* If the operands are floating-point constants, see if we can fold
2005 the result. */
2006 else if (GET_CODE (trueop0) == CONST_DOUBLE
2007 && GET_CODE (trueop1) == CONST_DOUBLE
2008 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2010 REAL_VALUE_TYPE d0, d1;
2012 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2013 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2015 /* Comparisons are unordered iff at least one of the values is NaN. */
2016 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2017 switch (code)
2019 case UNEQ:
2020 case UNLT:
2021 case UNGT:
2022 case UNLE:
2023 case UNGE:
2024 case NE:
2025 case UNORDERED:
2026 return const_true_rtx;
2027 case EQ:
2028 case LT:
2029 case GT:
2030 case LE:
2031 case GE:
2032 case LTGT:
2033 case ORDERED:
2034 return const0_rtx;
2035 default:
2036 return 0;
2039 equal = REAL_VALUES_EQUAL (d0, d1);
2040 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2041 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2044 /* Otherwise, see if the operands are both integers. */
2045 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2046 && (GET_CODE (trueop0) == CONST_DOUBLE
2047 || GET_CODE (trueop0) == CONST_INT)
2048 && (GET_CODE (trueop1) == CONST_DOUBLE
2049 || GET_CODE (trueop1) == CONST_INT))
2051 int width = GET_MODE_BITSIZE (mode);
2052 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2053 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2055 /* Get the two words comprising each integer constant. */
2056 if (GET_CODE (trueop0) == CONST_DOUBLE)
2058 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2059 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2061 else
2063 l0u = l0s = INTVAL (trueop0);
2064 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2067 if (GET_CODE (trueop1) == CONST_DOUBLE)
2069 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2070 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2072 else
2074 l1u = l1s = INTVAL (trueop1);
2075 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2078 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2079 we have to sign or zero-extend the values. */
2080 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2082 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2083 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2085 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2086 l0s |= ((HOST_WIDE_INT) (-1) << width);
2088 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2089 l1s |= ((HOST_WIDE_INT) (-1) << width);
2091 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2092 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2094 equal = (h0u == h1u && l0u == l1u);
2095 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2096 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2097 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2098 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2101 /* Otherwise, there are some code-specific tests we can make. */
2102 else
2104 switch (code)
2106 case EQ:
2107 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2108 return const0_rtx;
2109 break;
2111 case NE:
2112 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2113 return const_true_rtx;
2114 break;
2116 case GEU:
2117 /* Unsigned values are never negative. */
2118 if (trueop1 == const0_rtx)
2119 return const_true_rtx;
2120 break;
2122 case LTU:
2123 if (trueop1 == const0_rtx)
2124 return const0_rtx;
2125 break;
2127 case LEU:
2128 /* Unsigned values are never greater than the largest
2129 unsigned value. */
2130 if (GET_CODE (trueop1) == CONST_INT
2131 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2132 && INTEGRAL_MODE_P (mode))
2133 return const_true_rtx;
2134 break;
2136 case GTU:
2137 if (GET_CODE (trueop1) == CONST_INT
2138 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2139 && INTEGRAL_MODE_P (mode))
2140 return const0_rtx;
2141 break;
2143 case LT:
2144 /* Optimize abs(x) < 0.0. */
2145 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2147 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2148 : trueop0;
2149 if (GET_CODE (tem) == ABS)
2150 return const0_rtx;
2152 break;
2154 case GE:
2155 /* Optimize abs(x) >= 0.0. */
2156 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2158 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2159 : trueop0;
2160 if (GET_CODE (tem) == ABS)
2161 return const1_rtx;
2163 break;
2165 default:
2166 break;
2169 return 0;
2172 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2173 as appropriate. */
2174 switch (code)
2176 case EQ:
2177 case UNEQ:
2178 return equal ? const_true_rtx : const0_rtx;
2179 case NE:
2180 case LTGT:
2181 return ! equal ? const_true_rtx : const0_rtx;
2182 case LT:
2183 case UNLT:
2184 return op0lt ? const_true_rtx : const0_rtx;
2185 case GT:
2186 case UNGT:
2187 return op1lt ? const_true_rtx : const0_rtx;
2188 case LTU:
2189 return op0ltu ? const_true_rtx : const0_rtx;
2190 case GTU:
2191 return op1ltu ? const_true_rtx : const0_rtx;
2192 case LE:
2193 case UNLE:
2194 return equal || op0lt ? const_true_rtx : const0_rtx;
2195 case GE:
2196 case UNGE:
2197 return equal || op1lt ? const_true_rtx : const0_rtx;
2198 case LEU:
2199 return equal || op0ltu ? const_true_rtx : const0_rtx;
2200 case GEU:
2201 return equal || op1ltu ? const_true_rtx : const0_rtx;
2202 case ORDERED:
2203 return const_true_rtx;
2204 case UNORDERED:
2205 return const0_rtx;
2206 default:
2207 abort ();
2211 /* Simplify CODE, an operation with result mode MODE and three operands,
2212 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2213 a constant. Return 0 if no simplifications is possible. */
2216 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2217 enum rtx_code code;
2218 enum machine_mode mode, op0_mode;
2219 rtx op0, op1, op2;
2221 unsigned int width = GET_MODE_BITSIZE (mode);
2223 /* VOIDmode means "infinite" precision. */
2224 if (width == 0)
2225 width = HOST_BITS_PER_WIDE_INT;
2227 switch (code)
2229 case SIGN_EXTRACT:
2230 case ZERO_EXTRACT:
2231 if (GET_CODE (op0) == CONST_INT
2232 && GET_CODE (op1) == CONST_INT
2233 && GET_CODE (op2) == CONST_INT
2234 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2235 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2237 /* Extracting a bit-field from a constant */
2238 HOST_WIDE_INT val = INTVAL (op0);
2240 if (BITS_BIG_ENDIAN)
2241 val >>= (GET_MODE_BITSIZE (op0_mode)
2242 - INTVAL (op2) - INTVAL (op1));
2243 else
2244 val >>= INTVAL (op2);
2246 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2248 /* First zero-extend. */
2249 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2250 /* If desired, propagate sign bit. */
2251 if (code == SIGN_EXTRACT
2252 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2253 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2256 /* Clear the bits that don't belong in our mode,
2257 unless they and our sign bit are all one.
2258 So we get either a reasonable negative value or a reasonable
2259 unsigned value for this mode. */
2260 if (width < HOST_BITS_PER_WIDE_INT
2261 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2262 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2263 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2265 return GEN_INT (val);
2267 break;
2269 case IF_THEN_ELSE:
2270 if (GET_CODE (op0) == CONST_INT)
2271 return op0 != const0_rtx ? op1 : op2;
2273 /* Convert a == b ? b : a to "a". */
2274 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2275 && !HONOR_NANS (mode)
2276 && rtx_equal_p (XEXP (op0, 0), op1)
2277 && rtx_equal_p (XEXP (op0, 1), op2))
2278 return op1;
2279 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2280 && !HONOR_NANS (mode)
2281 && rtx_equal_p (XEXP (op0, 1), op1)
2282 && rtx_equal_p (XEXP (op0, 0), op2))
2283 return op2;
2284 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2286 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2287 ? GET_MODE (XEXP (op0, 1))
2288 : GET_MODE (XEXP (op0, 0)));
2289 rtx temp;
2290 if (cmp_mode == VOIDmode)
2291 cmp_mode = op0_mode;
2292 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2293 XEXP (op0, 0), XEXP (op0, 1));
2295 /* See if any simplifications were possible. */
2296 if (temp == const0_rtx)
2297 return op2;
2298 else if (temp == const1_rtx)
2299 return op1;
2300 else if (temp)
2301 op0 = temp;
2303 /* Look for happy constants in op1 and op2. */
2304 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2306 HOST_WIDE_INT t = INTVAL (op1);
2307 HOST_WIDE_INT f = INTVAL (op2);
2309 if (t == STORE_FLAG_VALUE && f == 0)
2310 code = GET_CODE (op0);
2311 else if (t == 0 && f == STORE_FLAG_VALUE)
2313 enum rtx_code tmp;
2314 tmp = reversed_comparison_code (op0, NULL_RTX);
2315 if (tmp == UNKNOWN)
2316 break;
2317 code = tmp;
2319 else
2320 break;
2322 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2325 break;
2327 default:
2328 abort ();
2331 return 0;
2334 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2335 Return 0 if no simplifications is possible. */
2337 simplify_subreg (outermode, op, innermode, byte)
2338 rtx op;
2339 unsigned int byte;
2340 enum machine_mode outermode, innermode;
2342 /* Little bit of sanity checking. */
2343 if (innermode == VOIDmode || outermode == VOIDmode
2344 || innermode == BLKmode || outermode == BLKmode)
2345 abort ();
2347 if (GET_MODE (op) != innermode
2348 && GET_MODE (op) != VOIDmode)
2349 abort ();
2351 if (byte % GET_MODE_SIZE (outermode)
2352 || byte >= GET_MODE_SIZE (innermode))
2353 abort ();
2355 if (outermode == innermode && !byte)
2356 return op;
2358 /* Simplify subregs of vector constants. */
2359 if (GET_CODE (op) == CONST_VECTOR)
2361 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2362 const unsigned int offset = byte / elt_size;
2363 rtx elt;
2365 if (GET_MODE_INNER (innermode) == outermode)
2367 elt = CONST_VECTOR_ELT (op, offset);
2369 /* ?? We probably don't need this copy_rtx because constants
2370 can be shared. ?? */
2372 return copy_rtx (elt);
2374 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2375 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2377 return (gen_rtx_CONST_VECTOR
2378 (outermode,
2379 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2380 &CONST_VECTOR_ELT (op, offset))));
2382 else if (GET_MODE_CLASS (outermode) == MODE_INT
2383 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2385 /* This happens when the target register size is smaller then
2386 the vector mode, and we synthesize operations with vectors
2387 of elements that are smaller than the register size. */
2388 HOST_WIDE_INT sum = 0, high = 0;
2389 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2390 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2391 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2392 int shift = BITS_PER_UNIT * elt_size;
2394 for (; n_elts--; i += step)
2396 elt = CONST_VECTOR_ELT (op, i);
2397 if (GET_CODE (elt) == CONST_DOUBLE
2398 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2400 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2401 elt);
2402 if (! elt)
2403 return NULL_RTX;
2405 if (GET_CODE (elt) != CONST_INT)
2406 return NULL_RTX;
2407 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2408 sum = (sum << shift) + INTVAL (elt);
2410 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2411 return GEN_INT (trunc_int_for_mode (sum, outermode));
2412 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2413 return immed_double_const (high, sum, outermode);
2414 else
2415 return NULL_RTX;
2417 else if (GET_MODE_CLASS (outermode) == MODE_INT
2418 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2420 enum machine_mode new_mode
2421 = int_mode_for_mode (GET_MODE_INNER (innermode));
2422 int subbyte = byte % elt_size;
2424 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2425 if (! op)
2426 return NULL_RTX;
2427 return simplify_subreg (outermode, op, new_mode, subbyte);
2429 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2430 /* This shouldn't happen, but let's not do anything stupid. */
2431 return NULL_RTX;
2434 /* Attempt to simplify constant to non-SUBREG expression. */
2435 if (CONSTANT_P (op))
2437 int offset, part;
2438 unsigned HOST_WIDE_INT val = 0;
2440 if (GET_MODE_CLASS (outermode) == MODE_VECTOR_INT
2441 || GET_MODE_CLASS (outermode) == MODE_VECTOR_FLOAT)
2443 /* Construct a CONST_VECTOR from individual subregs. */
2444 enum machine_mode submode = GET_MODE_INNER (outermode);
2445 int subsize = GET_MODE_UNIT_SIZE (outermode);
2446 int i, elts = GET_MODE_NUNITS (outermode);
2447 rtvec v = rtvec_alloc (elts);
2448 rtx elt;
2450 for (i = 0; i < elts; i++, byte += subsize)
2452 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2453 /* ??? It would be nice if we could actually make such subregs
2454 on targets that allow such relocations. */
2455 if (byte >= GET_MODE_UNIT_SIZE (innermode))
2456 elt = CONST0_RTX (submode);
2457 else
2458 elt = simplify_subreg (submode, op, innermode, byte);
2459 if (! elt)
2460 return NULL_RTX;
2461 RTVEC_ELT (v, i) = elt;
2463 return gen_rtx_CONST_VECTOR (outermode, v);
2466 /* ??? This code is partly redundant with code below, but can handle
2467 the subregs of floats and similar corner cases.
2468 Later it we should move all simplification code here and rewrite
2469 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2470 using SIMPLIFY_SUBREG. */
2471 if (subreg_lowpart_offset (outermode, innermode) == byte
2472 && GET_CODE (op) != CONST_VECTOR)
2474 rtx new = gen_lowpart_if_possible (outermode, op);
2475 if (new)
2476 return new;
2479 /* Similar comment as above apply here. */
2480 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2481 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2482 && GET_MODE_CLASS (outermode) == MODE_INT)
2484 rtx new = constant_subword (op,
2485 (byte / UNITS_PER_WORD),
2486 innermode);
2487 if (new)
2488 return new;
2491 if (GET_MODE_CLASS (outermode) != MODE_INT
2492 && GET_MODE_CLASS (outermode) != MODE_CC)
2494 enum machine_mode new_mode = int_mode_for_mode (outermode);
2496 if (new_mode != innermode || byte != 0)
2498 op = simplify_subreg (new_mode, op, innermode, byte);
2499 if (! op)
2500 return NULL_RTX;
2501 return simplify_subreg (outermode, op, new_mode, 0);
2505 offset = byte * BITS_PER_UNIT;
2506 switch (GET_CODE (op))
2508 case CONST_DOUBLE:
2509 if (GET_MODE (op) != VOIDmode)
2510 break;
2512 /* We can't handle this case yet. */
2513 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2514 return NULL_RTX;
2516 part = offset >= HOST_BITS_PER_WIDE_INT;
2517 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2518 && BYTES_BIG_ENDIAN)
2519 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2520 && WORDS_BIG_ENDIAN))
2521 part = !part;
2522 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2523 offset %= HOST_BITS_PER_WIDE_INT;
2525 /* We've already picked the word we want from a double, so
2526 pretend this is actually an integer. */
2527 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2529 /* FALLTHROUGH */
2530 case CONST_INT:
2531 if (GET_CODE (op) == CONST_INT)
2532 val = INTVAL (op);
2534 /* We don't handle synthesizing of non-integral constants yet. */
2535 if (GET_MODE_CLASS (outermode) != MODE_INT)
2536 return NULL_RTX;
2538 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2540 if (WORDS_BIG_ENDIAN)
2541 offset = (GET_MODE_BITSIZE (innermode)
2542 - GET_MODE_BITSIZE (outermode) - offset);
2543 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2544 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2545 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2546 - 2 * (offset % BITS_PER_WORD));
2549 if (offset >= HOST_BITS_PER_WIDE_INT)
2550 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2551 else
2553 val >>= offset;
2554 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2555 val = trunc_int_for_mode (val, outermode);
2556 return GEN_INT (val);
2558 default:
2559 break;
2563 /* Changing mode twice with SUBREG => just change it once,
2564 or not at all if changing back op starting mode. */
2565 if (GET_CODE (op) == SUBREG)
2567 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2568 int final_offset = byte + SUBREG_BYTE (op);
2569 rtx new;
2571 if (outermode == innermostmode
2572 && byte == 0 && SUBREG_BYTE (op) == 0)
2573 return SUBREG_REG (op);
2575 /* The SUBREG_BYTE represents offset, as if the value were stored
2576 in memory. Irritating exception is paradoxical subreg, where
2577 we define SUBREG_BYTE to be 0. On big endian machines, this
2578 value should be negative. For a moment, undo this exception. */
2579 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2581 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2582 if (WORDS_BIG_ENDIAN)
2583 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2584 if (BYTES_BIG_ENDIAN)
2585 final_offset += difference % UNITS_PER_WORD;
2587 if (SUBREG_BYTE (op) == 0
2588 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2590 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2591 if (WORDS_BIG_ENDIAN)
2592 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2593 if (BYTES_BIG_ENDIAN)
2594 final_offset += difference % UNITS_PER_WORD;
2597 /* See whether resulting subreg will be paradoxical. */
2598 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2600 /* In nonparadoxical subregs we can't handle negative offsets. */
2601 if (final_offset < 0)
2602 return NULL_RTX;
2603 /* Bail out in case resulting subreg would be incorrect. */
2604 if (final_offset % GET_MODE_SIZE (outermode)
2605 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2606 return NULL_RTX;
2608 else
2610 int offset = 0;
2611 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2613 /* In paradoxical subreg, see if we are still looking on lower part.
2614 If so, our SUBREG_BYTE will be 0. */
2615 if (WORDS_BIG_ENDIAN)
2616 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2617 if (BYTES_BIG_ENDIAN)
2618 offset += difference % UNITS_PER_WORD;
2619 if (offset == final_offset)
2620 final_offset = 0;
2621 else
2622 return NULL_RTX;
2625 /* Recurse for futher possible simplifications. */
2626 new = simplify_subreg (outermode, SUBREG_REG (op),
2627 GET_MODE (SUBREG_REG (op)),
2628 final_offset);
2629 if (new)
2630 return new;
2631 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2634 /* SUBREG of a hard register => just change the register number
2635 and/or mode. If the hard register is not valid in that mode,
2636 suppress this simplification. If the hard register is the stack,
2637 frame, or argument pointer, leave this as a SUBREG. */
2639 if (REG_P (op)
2640 && (! REG_FUNCTION_VALUE_P (op)
2641 || ! rtx_equal_function_value_matters)
2642 && REGNO (op) < FIRST_PSEUDO_REGISTER
2643 #ifdef CANNOT_CHANGE_MODE_CLASS
2644 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
2645 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2646 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
2647 #endif
2648 && ((reload_completed && !frame_pointer_needed)
2649 || (REGNO (op) != FRAME_POINTER_REGNUM
2650 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2651 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2652 #endif
2654 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2655 && REGNO (op) != ARG_POINTER_REGNUM
2656 #endif
2657 && REGNO (op) != STACK_POINTER_REGNUM)
2659 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2662 /* ??? We do allow it if the current REG is not valid for
2663 its mode. This is a kludge to work around how float/complex
2664 arguments are passed on 32-bit SPARC and should be fixed. */
2665 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2666 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2668 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
2670 /* Propagate original regno. We don't have any way to specify
2671 the offset inside original regno, so do so only for lowpart.
2672 The information is used only by alias analysis that can not
2673 grog partial register anyway. */
2675 if (subreg_lowpart_offset (outermode, innermode) == byte)
2676 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2677 return x;
2681 /* If we have a SUBREG of a register that we are replacing and we are
2682 replacing it with a MEM, make a new MEM and try replacing the
2683 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2684 or if we would be widening it. */
2686 if (GET_CODE (op) == MEM
2687 && ! mode_dependent_address_p (XEXP (op, 0))
2688 /* Allow splitting of volatile memory references in case we don't
2689 have instruction to move the whole thing. */
2690 && (! MEM_VOLATILE_P (op)
2691 || ! have_insn_for (SET, innermode))
2692 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2693 return adjust_address_nv (op, outermode, byte);
2695 /* Handle complex values represented as CONCAT
2696 of real and imaginary part. */
2697 if (GET_CODE (op) == CONCAT)
2699 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2700 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2701 unsigned int final_offset;
2702 rtx res;
2704 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2705 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2706 if (res)
2707 return res;
2708 /* We can at least simplify it by referring directly to the relevant part. */
2709 return gen_rtx_SUBREG (outermode, part, final_offset);
2712 return NULL_RTX;
2714 /* Make a SUBREG operation or equivalent if it folds. */
2717 simplify_gen_subreg (outermode, op, innermode, byte)
2718 rtx op;
2719 unsigned int byte;
2720 enum machine_mode outermode, innermode;
2722 rtx new;
2723 /* Little bit of sanity checking. */
2724 if (innermode == VOIDmode || outermode == VOIDmode
2725 || innermode == BLKmode || outermode == BLKmode)
2726 abort ();
2728 if (GET_MODE (op) != innermode
2729 && GET_MODE (op) != VOIDmode)
2730 abort ();
2732 if (byte % GET_MODE_SIZE (outermode)
2733 || byte >= GET_MODE_SIZE (innermode))
2734 abort ();
2736 if (GET_CODE (op) == QUEUED)
2737 return NULL_RTX;
2739 new = simplify_subreg (outermode, op, innermode, byte);
2740 if (new)
2741 return new;
2743 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2744 return NULL_RTX;
2746 return gen_rtx_SUBREG (outermode, op, byte);
2748 /* Simplify X, an rtx expression.
2750 Return the simplified expression or NULL if no simplifications
2751 were possible.
2753 This is the preferred entry point into the simplification routines;
2754 however, we still allow passes to call the more specific routines.
2756 Right now GCC has three (yes, three) major bodies of RTL simplification
2757 code that need to be unified.
2759 1. fold_rtx in cse.c. This code uses various CSE specific
2760 information to aid in RTL simplification.
2762 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2763 it uses combine specific information to aid in RTL
2764 simplification.
2766 3. The routines in this file.
2769 Long term we want to only have one body of simplification code; to
2770 get to that state I recommend the following steps:
2772 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2773 which are not pass dependent state into these routines.
2775 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2776 use this routine whenever possible.
2778 3. Allow for pass dependent state to be provided to these
2779 routines and add simplifications based on the pass dependent
2780 state. Remove code from cse.c & combine.c that becomes
2781 redundant/dead.
2783 It will take time, but ultimately the compiler will be easier to
2784 maintain and improve. It's totally silly that when we add a
2785 simplification that it needs to be added to 4 places (3 for RTL
2786 simplification and 1 for tree simplification. */
2789 simplify_rtx (x)
2790 rtx x;
2792 enum rtx_code code = GET_CODE (x);
2793 enum machine_mode mode = GET_MODE (x);
2795 switch (GET_RTX_CLASS (code))
2797 case '1':
2798 return simplify_unary_operation (code, mode,
2799 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2800 case 'c':
2801 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2803 rtx tem;
2805 tem = XEXP (x, 0);
2806 XEXP (x, 0) = XEXP (x, 1);
2807 XEXP (x, 1) = tem;
2808 return simplify_binary_operation (code, mode,
2809 XEXP (x, 0), XEXP (x, 1));
2812 case '2':
2813 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2815 case '3':
2816 case 'b':
2817 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2818 XEXP (x, 0), XEXP (x, 1),
2819 XEXP (x, 2));
2821 case '<':
2822 return simplify_relational_operation (code,
2823 ((GET_MODE (XEXP (x, 0))
2824 != VOIDmode)
2825 ? GET_MODE (XEXP (x, 0))
2826 : GET_MODE (XEXP (x, 1))),
2827 XEXP (x, 0), XEXP (x, 1));
2828 case 'x':
2829 if (code == SUBREG)
2830 return simplify_gen_subreg (mode, SUBREG_REG (x),
2831 GET_MODE (SUBREG_REG (x)),
2832 SUBREG_BYTE (x));
2833 if (code == CONSTANT_P_RTX)
2835 if (CONSTANT_P (XEXP (x,0)))
2836 return const1_rtx;
2838 return NULL;
2839 default:
2840 return NULL;