Daily bump.
[official-gcc.git] / gcc / simplify-rtx.c
blob2896041c4fd9f3ddaf210e529607f08ca9001f96
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "toplev.h"
37 #include "output.h"
38 #include "ggc.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
65 /* Similar, but also allows reference to the stack pointer.
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
91 /* Much code operates on (low, high) pairs; the low value is an
92 unsigned wide int, the high value a signed wide int. We
93 occasionally need to sign extend from low to high as if low were a
94 signed wide int. */
95 #define HWI_SIGN_EXTEND(low) \
96 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
99 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
100 const void *));
101 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
102 enum machine_mode, rtx,
103 rtx, int));
104 static void check_fold_consts PARAMS ((PTR));
105 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
106 static void simplify_unary_real PARAMS ((PTR));
107 static void simplify_binary_real PARAMS ((PTR));
108 #endif
109 static void simplify_binary_is2orm1 PARAMS ((PTR));
112 /* Negate a CONST_INT rtx, truncating (because a conversion from a
113 maximally negative number can overflow). */
114 static rtx
115 neg_const_int (mode, i)
116 enum machine_mode mode;
117 rtx i;
119 return GEN_INT (trunc_int_for_mode (- INTVAL (i), mode));
123 /* Make a binary operation by properly ordering the operands and
124 seeing if the expression folds. */
127 simplify_gen_binary (code, mode, op0, op1)
128 enum rtx_code code;
129 enum machine_mode mode;
130 rtx op0, op1;
132 rtx tem;
134 /* Put complex operands first and constants second if commutative. */
135 if (GET_RTX_CLASS (code) == 'c'
136 && swap_commutative_operands_p (op0, op1))
137 tem = op0, op0 = op1, op1 = tem;
139 /* If this simplifies, do it. */
140 tem = simplify_binary_operation (code, mode, op0, op1);
141 if (tem)
142 return tem;
144 /* Handle addition and subtraction specially. Otherwise, just form
145 the operation. */
147 if (code == PLUS || code == MINUS)
149 tem = simplify_plus_minus (code, mode, op0, op1, 1);
150 if (tem)
151 return tem;
154 return gen_rtx_fmt_ee (code, mode, op0, op1);
157 /* If X is a MEM referencing the constant pool, return the real value.
158 Otherwise return X. */
160 avoid_constant_pool_reference (x)
161 rtx x;
163 rtx c, addr;
164 enum machine_mode cmode;
166 if (GET_CODE (x) != MEM)
167 return x;
168 addr = XEXP (x, 0);
170 if (GET_CODE (addr) != SYMBOL_REF
171 || ! CONSTANT_POOL_ADDRESS_P (addr))
172 return x;
174 c = get_pool_constant (addr);
175 cmode = get_pool_mode (addr);
177 /* If we're accessing the constant in a different mode than it was
178 originally stored, attempt to fix that up via subreg simplifications.
179 If that fails we have no choice but to return the original memory. */
180 if (cmode != GET_MODE (x))
182 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
183 return c ? c : x;
186 return c;
189 /* Make a unary operation by first seeing if it folds and otherwise making
190 the specified operation. */
193 simplify_gen_unary (code, mode, op, op_mode)
194 enum rtx_code code;
195 enum machine_mode mode;
196 rtx op;
197 enum machine_mode op_mode;
199 rtx tem;
201 /* If this simplifies, use it. */
202 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
203 return tem;
205 return gen_rtx_fmt_e (code, mode, op);
208 /* Likewise for ternary operations. */
211 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
212 enum rtx_code code;
213 enum machine_mode mode, op0_mode;
214 rtx op0, op1, op2;
216 rtx tem;
218 /* If this simplifies, use it. */
219 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
220 op0, op1, op2)))
221 return tem;
223 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
226 /* Likewise, for relational operations.
227 CMP_MODE specifies mode comparison is done in.
231 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
232 enum rtx_code code;
233 enum machine_mode mode;
234 enum machine_mode cmp_mode;
235 rtx op0, op1;
237 rtx tem;
239 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
240 return tem;
242 /* Put complex operands first and constants second. */
243 if (swap_commutative_operands_p (op0, op1))
244 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
246 return gen_rtx_fmt_ee (code, mode, op0, op1);
249 /* Replace all occurrences of OLD in X with NEW and try to simplify the
250 resulting RTX. Return a new RTX which is as simplified as possible. */
253 simplify_replace_rtx (x, old, new)
254 rtx x;
255 rtx old;
256 rtx new;
258 enum rtx_code code = GET_CODE (x);
259 enum machine_mode mode = GET_MODE (x);
261 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
262 to build a new expression substituting recursively. If we can't do
263 anything, return our input. */
265 if (x == old)
266 return new;
268 switch (GET_RTX_CLASS (code))
270 case '1':
272 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
273 rtx op = (XEXP (x, 0) == old
274 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
276 return simplify_gen_unary (code, mode, op, op_mode);
279 case '2':
280 case 'c':
281 return
282 simplify_gen_binary (code, mode,
283 simplify_replace_rtx (XEXP (x, 0), old, new),
284 simplify_replace_rtx (XEXP (x, 1), old, new));
285 case '<':
287 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
288 ? GET_MODE (XEXP (x, 0))
289 : GET_MODE (XEXP (x, 1)));
290 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
291 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
293 return
294 simplify_gen_relational (code, mode,
295 (op_mode != VOIDmode
296 ? op_mode
297 : GET_MODE (op0) != VOIDmode
298 ? GET_MODE (op0)
299 : GET_MODE (op1)),
300 op0, op1);
303 case '3':
304 case 'b':
306 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
307 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
309 return
310 simplify_gen_ternary (code, mode,
311 (op_mode != VOIDmode
312 ? op_mode
313 : GET_MODE (op0)),
314 op0,
315 simplify_replace_rtx (XEXP (x, 1), old, new),
316 simplify_replace_rtx (XEXP (x, 2), old, new));
319 case 'x':
320 /* The only case we try to handle is a SUBREG. */
321 if (code == SUBREG)
323 rtx exp;
324 exp = simplify_gen_subreg (GET_MODE (x),
325 simplify_replace_rtx (SUBREG_REG (x),
326 old, new),
327 GET_MODE (SUBREG_REG (x)),
328 SUBREG_BYTE (x));
329 if (exp)
330 x = exp;
332 return x;
334 default:
335 if (GET_CODE (x) == MEM)
336 return
337 replace_equiv_address_nv (x,
338 simplify_replace_rtx (XEXP (x, 0),
339 old, new));
341 return x;
343 return x;
346 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
347 /* Subroutine of simplify_unary_operation, called via do_float_handler.
348 Handles simplification of unary ops on floating point values. */
349 struct simplify_unary_real_args
351 rtx operand;
352 rtx result;
353 enum machine_mode mode;
354 enum rtx_code code;
355 bool want_integer;
357 #define REAL_VALUE_ABS(d_) \
358 (REAL_VALUE_NEGATIVE (d_) ? REAL_VALUE_NEGATE (d_) : (d_))
360 static void
361 simplify_unary_real (p)
362 PTR p;
364 REAL_VALUE_TYPE d;
366 struct simplify_unary_real_args *args =
367 (struct simplify_unary_real_args *) p;
369 REAL_VALUE_FROM_CONST_DOUBLE (d, args->operand);
371 if (args->want_integer)
373 HOST_WIDE_INT i;
375 switch (args->code)
377 case FIX: i = REAL_VALUE_FIX (d); break;
378 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
379 default:
380 abort ();
382 args->result = GEN_INT (trunc_int_for_mode (i, args->mode));
384 else
386 switch (args->code)
388 case SQRT:
389 /* We don't attempt to optimize this. */
390 args->result = 0;
391 return;
393 case ABS: d = REAL_VALUE_ABS (d); break;
394 case NEG: d = REAL_VALUE_NEGATE (d); break;
395 case FLOAT_TRUNCATE: d = real_value_truncate (args->mode, d); break;
396 case FLOAT_EXTEND: /* All this does is change the mode. */ break;
397 case FIX: d = REAL_VALUE_RNDZINT (d); break;
398 case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break;
399 default:
400 abort ();
402 args->result = CONST_DOUBLE_FROM_REAL_VALUE (d, args->mode);
405 #endif
407 /* Try to simplify a unary operation CODE whose output mode is to be
408 MODE with input operand OP whose mode was originally OP_MODE.
409 Return zero if no simplification can be made. */
411 simplify_unary_operation (code, mode, op, op_mode)
412 enum rtx_code code;
413 enum machine_mode mode;
414 rtx op;
415 enum machine_mode op_mode;
417 unsigned int width = GET_MODE_BITSIZE (mode);
418 rtx trueop = avoid_constant_pool_reference (op);
420 /* The order of these tests is critical so that, for example, we don't
421 check the wrong mode (input vs. output) for a conversion operation,
422 such as FIX. At some point, this should be simplified. */
424 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
426 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
427 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
429 HOST_WIDE_INT hv, lv;
430 REAL_VALUE_TYPE d;
432 if (GET_CODE (trueop) == CONST_INT)
433 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
434 else
435 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
437 #ifdef REAL_ARITHMETIC
438 REAL_VALUE_FROM_INT (d, lv, hv, mode);
439 #else
440 if (hv < 0)
442 d = (double) (~ hv);
443 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
444 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
445 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
446 d = (- d - 1.0);
448 else
450 d = (double) hv;
451 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
452 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
453 d += (double) (unsigned HOST_WIDE_INT) lv;
455 #endif /* REAL_ARITHMETIC */
456 d = real_value_truncate (mode, d);
457 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
459 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
460 && (GET_CODE (trueop) == CONST_DOUBLE
461 || GET_CODE (trueop) == CONST_INT))
463 HOST_WIDE_INT hv, lv;
464 REAL_VALUE_TYPE d;
466 if (GET_CODE (trueop) == CONST_INT)
467 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
468 else
469 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
471 if (op_mode == VOIDmode)
473 /* We don't know how to interpret negative-looking numbers in
474 this case, so don't try to fold those. */
475 if (hv < 0)
476 return 0;
478 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
480 else
481 hv = 0, lv &= GET_MODE_MASK (op_mode);
483 #ifdef REAL_ARITHMETIC
484 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
485 #else
487 d = (double) (unsigned HOST_WIDE_INT) hv;
488 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
489 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
490 d += (double) (unsigned HOST_WIDE_INT) lv;
491 #endif /* REAL_ARITHMETIC */
492 d = real_value_truncate (mode, d);
493 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
495 #endif
497 if (GET_CODE (trueop) == CONST_INT
498 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
500 HOST_WIDE_INT arg0 = INTVAL (trueop);
501 HOST_WIDE_INT val;
503 switch (code)
505 case NOT:
506 val = ~ arg0;
507 break;
509 case NEG:
510 val = - arg0;
511 break;
513 case ABS:
514 val = (arg0 >= 0 ? arg0 : - arg0);
515 break;
517 case FFS:
518 /* Don't use ffs here. Instead, get low order bit and then its
519 number. If arg0 is zero, this will return 0, as desired. */
520 arg0 &= GET_MODE_MASK (mode);
521 val = exact_log2 (arg0 & (- arg0)) + 1;
522 break;
524 case TRUNCATE:
525 val = arg0;
526 break;
528 case ZERO_EXTEND:
529 /* When zero-extending a CONST_INT, we need to know its
530 original mode. */
531 if (op_mode == VOIDmode)
532 abort ();
533 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
535 /* If we were really extending the mode,
536 we would have to distinguish between zero-extension
537 and sign-extension. */
538 if (width != GET_MODE_BITSIZE (op_mode))
539 abort ();
540 val = arg0;
542 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
543 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
544 else
545 return 0;
546 break;
548 case SIGN_EXTEND:
549 if (op_mode == VOIDmode)
550 op_mode = mode;
551 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
553 /* If we were really extending the mode,
554 we would have to distinguish between zero-extension
555 and sign-extension. */
556 if (width != GET_MODE_BITSIZE (op_mode))
557 abort ();
558 val = arg0;
560 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
563 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
564 if (val
565 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
566 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
568 else
569 return 0;
570 break;
572 case SQRT:
573 case FLOAT_EXTEND:
574 case FLOAT_TRUNCATE:
575 case SS_TRUNCATE:
576 case US_TRUNCATE:
577 return 0;
579 default:
580 abort ();
583 val = trunc_int_for_mode (val, mode);
585 return GEN_INT (val);
588 /* We can do some operations on integer CONST_DOUBLEs. Also allow
589 for a DImode operation on a CONST_INT. */
590 else if (GET_MODE (trueop) == VOIDmode
591 && width <= HOST_BITS_PER_WIDE_INT * 2
592 && (GET_CODE (trueop) == CONST_DOUBLE
593 || GET_CODE (trueop) == CONST_INT))
595 unsigned HOST_WIDE_INT l1, lv;
596 HOST_WIDE_INT h1, hv;
598 if (GET_CODE (trueop) == CONST_DOUBLE)
599 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
600 else
601 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
603 switch (code)
605 case NOT:
606 lv = ~ l1;
607 hv = ~ h1;
608 break;
610 case NEG:
611 neg_double (l1, h1, &lv, &hv);
612 break;
614 case ABS:
615 if (h1 < 0)
616 neg_double (l1, h1, &lv, &hv);
617 else
618 lv = l1, hv = h1;
619 break;
621 case FFS:
622 hv = 0;
623 if (l1 == 0)
624 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
625 else
626 lv = exact_log2 (l1 & (-l1)) + 1;
627 break;
629 case TRUNCATE:
630 /* This is just a change-of-mode, so do nothing. */
631 lv = l1, hv = h1;
632 break;
634 case ZERO_EXTEND:
635 if (op_mode == VOIDmode)
636 abort ();
638 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
639 return 0;
641 hv = 0;
642 lv = l1 & GET_MODE_MASK (op_mode);
643 break;
645 case SIGN_EXTEND:
646 if (op_mode == VOIDmode
647 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
648 return 0;
649 else
651 lv = l1 & GET_MODE_MASK (op_mode);
652 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
653 && (lv & ((HOST_WIDE_INT) 1
654 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
655 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
657 hv = HWI_SIGN_EXTEND (lv);
659 break;
661 case SQRT:
662 return 0;
664 default:
665 return 0;
668 return immed_double_const (lv, hv, mode);
671 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
672 else if (GET_CODE (trueop) == CONST_DOUBLE
673 && GET_MODE_CLASS (mode) == MODE_FLOAT)
675 struct simplify_unary_real_args args;
676 args.operand = trueop;
677 args.mode = mode;
678 args.code = code;
679 args.want_integer = false;
681 if (do_float_handler (simplify_unary_real, (PTR) &args))
682 return args.result;
684 return 0;
687 else if (GET_CODE (trueop) == CONST_DOUBLE
688 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
689 && GET_MODE_CLASS (mode) == MODE_INT
690 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
692 struct simplify_unary_real_args args;
693 args.operand = trueop;
694 args.mode = mode;
695 args.code = code;
696 args.want_integer = true;
698 if (do_float_handler (simplify_unary_real, (PTR) &args))
699 return args.result;
701 return 0;
703 #endif
704 /* This was formerly used only for non-IEEE float.
705 eggert@twinsun.com says it is safe for IEEE also. */
706 else
708 enum rtx_code reversed;
709 /* There are some simplifications we can do even if the operands
710 aren't constant. */
711 switch (code)
713 case NOT:
714 /* (not (not X)) == X. */
715 if (GET_CODE (op) == NOT)
716 return XEXP (op, 0);
718 /* (not (eq X Y)) == (ne X Y), etc. */
719 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
720 && ((reversed = reversed_comparison_code (op, NULL_RTX))
721 != UNKNOWN))
722 return gen_rtx_fmt_ee (reversed,
723 op_mode, XEXP (op, 0), XEXP (op, 1));
724 break;
726 case NEG:
727 /* (neg (neg X)) == X. */
728 if (GET_CODE (op) == NEG)
729 return XEXP (op, 0);
730 break;
732 case SIGN_EXTEND:
733 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
734 becomes just the MINUS if its mode is MODE. This allows
735 folding switch statements on machines using casesi (such as
736 the VAX). */
737 if (GET_CODE (op) == TRUNCATE
738 && GET_MODE (XEXP (op, 0)) == mode
739 && GET_CODE (XEXP (op, 0)) == MINUS
740 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
741 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
742 return XEXP (op, 0);
744 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
745 if (! POINTERS_EXTEND_UNSIGNED
746 && mode == Pmode && GET_MODE (op) == ptr_mode
747 && (CONSTANT_P (op)
748 || (GET_CODE (op) == SUBREG
749 && GET_CODE (SUBREG_REG (op)) == REG
750 && REG_POINTER (SUBREG_REG (op))
751 && GET_MODE (SUBREG_REG (op)) == Pmode)))
752 return convert_memory_address (Pmode, op);
753 #endif
754 break;
756 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
757 case ZERO_EXTEND:
758 if (POINTERS_EXTEND_UNSIGNED > 0
759 && mode == Pmode && GET_MODE (op) == ptr_mode
760 && (CONSTANT_P (op)
761 || (GET_CODE (op) == SUBREG
762 && GET_CODE (SUBREG_REG (op)) == REG
763 && REG_POINTER (SUBREG_REG (op))
764 && GET_MODE (SUBREG_REG (op)) == Pmode)))
765 return convert_memory_address (Pmode, op);
766 break;
767 #endif
769 default:
770 break;
773 return 0;
777 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
778 /* Subroutine of simplify_binary_operation, called via do_float_handler.
779 Handles simplification of binary ops on floating point values. */
780 struct simplify_binary_real_args
782 rtx trueop0, trueop1;
783 rtx result;
784 enum rtx_code code;
785 enum machine_mode mode;
788 static void
789 simplify_binary_real (p)
790 PTR p;
792 REAL_VALUE_TYPE f0, f1, value;
793 struct simplify_binary_real_args *args =
794 (struct simplify_binary_real_args *) p;
796 REAL_VALUE_FROM_CONST_DOUBLE (f0, args->trueop0);
797 REAL_VALUE_FROM_CONST_DOUBLE (f1, args->trueop1);
798 f0 = real_value_truncate (args->mode, f0);
799 f1 = real_value_truncate (args->mode, f1);
801 #ifdef REAL_ARITHMETIC
802 #ifndef REAL_INFINITY
803 if (args->code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
805 args->result = 0;
806 return;
808 #endif
809 REAL_ARITHMETIC (value, rtx_to_tree_code (args->code), f0, f1);
810 #else
811 switch (args->code)
813 case PLUS:
814 value = f0 + f1;
815 break;
816 case MINUS:
817 value = f0 - f1;
818 break;
819 case MULT:
820 value = f0 * f1;
821 break;
822 case DIV:
823 #ifndef REAL_INFINITY
824 if (f1 == 0)
825 return 0;
826 #endif
827 value = f0 / f1;
828 break;
829 case SMIN:
830 value = MIN (f0, f1);
831 break;
832 case SMAX:
833 value = MAX (f0, f1);
834 break;
835 default:
836 abort ();
838 #endif
840 value = real_value_truncate (args->mode, value);
841 args->result = CONST_DOUBLE_FROM_REAL_VALUE (value, args->mode);
843 #endif
845 /* Another subroutine called via do_float_handler. This one tests
846 the floating point value given against 2. and -1. */
847 struct simplify_binary_is2orm1_args
849 rtx value;
850 bool is_2;
851 bool is_m1;
854 static void
855 simplify_binary_is2orm1 (p)
856 PTR p;
858 REAL_VALUE_TYPE d;
859 struct simplify_binary_is2orm1_args *args =
860 (struct simplify_binary_is2orm1_args *) p;
862 REAL_VALUE_FROM_CONST_DOUBLE (d, args->value);
863 args->is_2 = REAL_VALUES_EQUAL (d, dconst2);
864 args->is_m1 = REAL_VALUES_EQUAL (d, dconstm1);
867 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
868 and OP1. Return 0 if no simplification is possible.
870 Don't use this for relational operations such as EQ or LT.
871 Use simplify_relational_operation instead. */
873 simplify_binary_operation (code, mode, op0, op1)
874 enum rtx_code code;
875 enum machine_mode mode;
876 rtx op0, op1;
878 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
879 HOST_WIDE_INT val;
880 unsigned int width = GET_MODE_BITSIZE (mode);
881 rtx tem;
882 rtx trueop0 = avoid_constant_pool_reference (op0);
883 rtx trueop1 = avoid_constant_pool_reference (op1);
885 /* Relational operations don't work here. We must know the mode
886 of the operands in order to do the comparison correctly.
887 Assuming a full word can give incorrect results.
888 Consider comparing 128 with -128 in QImode. */
890 if (GET_RTX_CLASS (code) == '<')
891 abort ();
893 /* Make sure the constant is second. */
894 if (GET_RTX_CLASS (code) == 'c'
895 && swap_commutative_operands_p (trueop0, trueop1))
897 tem = op0, op0 = op1, op1 = tem;
898 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
901 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
902 if (GET_MODE_CLASS (mode) == MODE_FLOAT
903 && GET_CODE (trueop0) == CONST_DOUBLE
904 && GET_CODE (trueop1) == CONST_DOUBLE
905 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
907 struct simplify_binary_real_args args;
908 args.trueop0 = trueop0;
909 args.trueop1 = trueop1;
910 args.mode = mode;
911 args.code = code;
913 if (do_float_handler (simplify_binary_real, (PTR) &args))
914 return args.result;
915 return 0;
917 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
919 /* We can fold some multi-word operations. */
920 if (GET_MODE_CLASS (mode) == MODE_INT
921 && width == HOST_BITS_PER_WIDE_INT * 2
922 && (GET_CODE (trueop0) == CONST_DOUBLE
923 || GET_CODE (trueop0) == CONST_INT)
924 && (GET_CODE (trueop1) == CONST_DOUBLE
925 || GET_CODE (trueop1) == CONST_INT))
927 unsigned HOST_WIDE_INT l1, l2, lv;
928 HOST_WIDE_INT h1, h2, hv;
930 if (GET_CODE (trueop0) == CONST_DOUBLE)
931 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
932 else
933 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
935 if (GET_CODE (trueop1) == CONST_DOUBLE)
936 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
937 else
938 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
940 switch (code)
942 case MINUS:
943 /* A - B == A + (-B). */
944 neg_double (l2, h2, &lv, &hv);
945 l2 = lv, h2 = hv;
947 /* .. fall through ... */
949 case PLUS:
950 add_double (l1, h1, l2, h2, &lv, &hv);
951 break;
953 case MULT:
954 mul_double (l1, h1, l2, h2, &lv, &hv);
955 break;
957 case DIV: case MOD: case UDIV: case UMOD:
958 /* We'd need to include tree.h to do this and it doesn't seem worth
959 it. */
960 return 0;
962 case AND:
963 lv = l1 & l2, hv = h1 & h2;
964 break;
966 case IOR:
967 lv = l1 | l2, hv = h1 | h2;
968 break;
970 case XOR:
971 lv = l1 ^ l2, hv = h1 ^ h2;
972 break;
974 case SMIN:
975 if (h1 < h2
976 || (h1 == h2
977 && ((unsigned HOST_WIDE_INT) l1
978 < (unsigned HOST_WIDE_INT) l2)))
979 lv = l1, hv = h1;
980 else
981 lv = l2, hv = h2;
982 break;
984 case SMAX:
985 if (h1 > h2
986 || (h1 == h2
987 && ((unsigned HOST_WIDE_INT) l1
988 > (unsigned HOST_WIDE_INT) l2)))
989 lv = l1, hv = h1;
990 else
991 lv = l2, hv = h2;
992 break;
994 case UMIN:
995 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
996 || (h1 == h2
997 && ((unsigned HOST_WIDE_INT) l1
998 < (unsigned HOST_WIDE_INT) l2)))
999 lv = l1, hv = h1;
1000 else
1001 lv = l2, hv = h2;
1002 break;
1004 case UMAX:
1005 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1006 || (h1 == h2
1007 && ((unsigned HOST_WIDE_INT) l1
1008 > (unsigned HOST_WIDE_INT) l2)))
1009 lv = l1, hv = h1;
1010 else
1011 lv = l2, hv = h2;
1012 break;
1014 case LSHIFTRT: case ASHIFTRT:
1015 case ASHIFT:
1016 case ROTATE: case ROTATERT:
1017 #ifdef SHIFT_COUNT_TRUNCATED
1018 if (SHIFT_COUNT_TRUNCATED)
1019 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1020 #endif
1022 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1023 return 0;
1025 if (code == LSHIFTRT || code == ASHIFTRT)
1026 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1027 code == ASHIFTRT);
1028 else if (code == ASHIFT)
1029 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1030 else if (code == ROTATE)
1031 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1032 else /* code == ROTATERT */
1033 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1034 break;
1036 default:
1037 return 0;
1040 return immed_double_const (lv, hv, mode);
1043 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1044 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1046 /* Even if we can't compute a constant result,
1047 there are some cases worth simplifying. */
1049 switch (code)
1051 case PLUS:
1052 /* In IEEE floating point, x+0 is not the same as x. Similarly
1053 for the other optimizations below. */
1054 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1055 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1056 break;
1058 if (trueop1 == CONST0_RTX (mode))
1059 return op0;
1061 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
1062 if (GET_CODE (op0) == NEG)
1063 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1064 else if (GET_CODE (op1) == NEG)
1065 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1067 /* (~a) + 1 -> -a */
1068 if (INTEGRAL_MODE_P (mode)
1069 && GET_CODE (op0) == NOT
1070 && trueop1 == const1_rtx)
1071 return gen_rtx_NEG (mode, XEXP (op0, 0));
1073 /* Handle both-operands-constant cases. We can only add
1074 CONST_INTs to constants since the sum of relocatable symbols
1075 can't be handled by most assemblers. Don't add CONST_INT
1076 to CONST_INT since overflow won't be computed properly if wider
1077 than HOST_BITS_PER_WIDE_INT. */
1079 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1080 && GET_CODE (op1) == CONST_INT)
1081 return plus_constant (op0, INTVAL (op1));
1082 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1083 && GET_CODE (op0) == CONST_INT)
1084 return plus_constant (op1, INTVAL (op0));
1086 /* See if this is something like X * C - X or vice versa or
1087 if the multiplication is written as a shift. If so, we can
1088 distribute and make a new multiply, shift, or maybe just
1089 have X (if C is 2 in the example above). But don't make
1090 real multiply if we didn't have one before. */
1092 if (! FLOAT_MODE_P (mode))
1094 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1095 rtx lhs = op0, rhs = op1;
1096 int had_mult = 0;
1098 if (GET_CODE (lhs) == NEG)
1099 coeff0 = -1, lhs = XEXP (lhs, 0);
1100 else if (GET_CODE (lhs) == MULT
1101 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1103 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1104 had_mult = 1;
1106 else if (GET_CODE (lhs) == ASHIFT
1107 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1108 && INTVAL (XEXP (lhs, 1)) >= 0
1109 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1111 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1112 lhs = XEXP (lhs, 0);
1115 if (GET_CODE (rhs) == NEG)
1116 coeff1 = -1, rhs = XEXP (rhs, 0);
1117 else if (GET_CODE (rhs) == MULT
1118 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1120 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1121 had_mult = 1;
1123 else if (GET_CODE (rhs) == ASHIFT
1124 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1125 && INTVAL (XEXP (rhs, 1)) >= 0
1126 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1128 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1129 rhs = XEXP (rhs, 0);
1132 if (rtx_equal_p (lhs, rhs))
1134 tem = simplify_gen_binary (MULT, mode, lhs,
1135 GEN_INT (coeff0 + coeff1));
1136 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1140 /* If one of the operands is a PLUS or a MINUS, see if we can
1141 simplify this by the associative law.
1142 Don't use the associative law for floating point.
1143 The inaccuracy makes it nonassociative,
1144 and subtle programs can break if operations are associated. */
1146 if (INTEGRAL_MODE_P (mode)
1147 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1148 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1149 || (GET_CODE (op0) == CONST
1150 && GET_CODE (XEXP (op0, 0)) == PLUS)
1151 || (GET_CODE (op1) == CONST
1152 && GET_CODE (XEXP (op1, 0)) == PLUS))
1153 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1154 return tem;
1155 break;
1157 case COMPARE:
1158 #ifdef HAVE_cc0
1159 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1160 using cc0, in which case we want to leave it as a COMPARE
1161 so we can distinguish it from a register-register-copy.
1163 In IEEE floating point, x-0 is not the same as x. */
1165 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1166 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1167 && trueop1 == CONST0_RTX (mode))
1168 return op0;
1169 #endif
1171 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1172 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1173 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1174 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1176 rtx xop00 = XEXP (op0, 0);
1177 rtx xop10 = XEXP (op1, 0);
1179 #ifdef HAVE_cc0
1180 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1181 #else
1182 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1183 && GET_MODE (xop00) == GET_MODE (xop10)
1184 && REGNO (xop00) == REGNO (xop10)
1185 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1186 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1187 #endif
1188 return xop00;
1190 break;
1192 case MINUS:
1193 /* None of these optimizations can be done for IEEE
1194 floating point. */
1195 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1196 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1197 break;
1199 /* We can't assume x-x is 0 even with non-IEEE floating point,
1200 but since it is zero except in very strange circumstances, we
1201 will treat it as zero with -funsafe-math-optimizations. */
1202 if (rtx_equal_p (trueop0, trueop1)
1203 && ! side_effects_p (op0)
1204 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1205 return CONST0_RTX (mode);
1207 /* Change subtraction from zero into negation. */
1208 if (trueop0 == CONST0_RTX (mode))
1209 return gen_rtx_NEG (mode, op1);
1211 /* (-1 - a) is ~a. */
1212 if (trueop0 == constm1_rtx)
1213 return gen_rtx_NOT (mode, op1);
1215 /* Subtracting 0 has no effect. */
1216 if (trueop1 == CONST0_RTX (mode))
1217 return op0;
1219 /* See if this is something like X * C - X or vice versa or
1220 if the multiplication is written as a shift. If so, we can
1221 distribute and make a new multiply, shift, or maybe just
1222 have X (if C is 2 in the example above). But don't make
1223 real multiply if we didn't have one before. */
1225 if (! FLOAT_MODE_P (mode))
1227 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1228 rtx lhs = op0, rhs = op1;
1229 int had_mult = 0;
1231 if (GET_CODE (lhs) == NEG)
1232 coeff0 = -1, lhs = XEXP (lhs, 0);
1233 else if (GET_CODE (lhs) == MULT
1234 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1236 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1237 had_mult = 1;
1239 else if (GET_CODE (lhs) == ASHIFT
1240 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1241 && INTVAL (XEXP (lhs, 1)) >= 0
1242 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1244 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1245 lhs = XEXP (lhs, 0);
1248 if (GET_CODE (rhs) == NEG)
1249 coeff1 = - 1, rhs = XEXP (rhs, 0);
1250 else if (GET_CODE (rhs) == MULT
1251 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1253 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1254 had_mult = 1;
1256 else if (GET_CODE (rhs) == ASHIFT
1257 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1258 && INTVAL (XEXP (rhs, 1)) >= 0
1259 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1261 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1262 rhs = XEXP (rhs, 0);
1265 if (rtx_equal_p (lhs, rhs))
1267 tem = simplify_gen_binary (MULT, mode, lhs,
1268 GEN_INT (coeff0 - coeff1));
1269 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1273 /* (a - (-b)) -> (a + b). */
1274 if (GET_CODE (op1) == NEG)
1275 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1277 /* If one of the operands is a PLUS or a MINUS, see if we can
1278 simplify this by the associative law.
1279 Don't use the associative law for floating point.
1280 The inaccuracy makes it nonassociative,
1281 and subtle programs can break if operations are associated. */
1283 if (INTEGRAL_MODE_P (mode)
1284 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1285 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1286 || (GET_CODE (op0) == CONST
1287 && GET_CODE (XEXP (op0, 0)) == PLUS)
1288 || (GET_CODE (op1) == CONST
1289 && GET_CODE (XEXP (op1, 0)) == PLUS))
1290 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1291 return tem;
1293 /* Don't let a relocatable value get a negative coeff. */
1294 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1295 return simplify_gen_binary (PLUS, mode,
1296 op0,
1297 neg_const_int (mode, op1));
1299 /* (x - (x & y)) -> (x & ~y) */
1300 if (GET_CODE (op1) == AND)
1302 if (rtx_equal_p (op0, XEXP (op1, 0)))
1303 return simplify_gen_binary (AND, mode, op0,
1304 gen_rtx_NOT (mode, XEXP (op1, 1)));
1305 if (rtx_equal_p (op0, XEXP (op1, 1)))
1306 return simplify_gen_binary (AND, mode, op0,
1307 gen_rtx_NOT (mode, XEXP (op1, 0)));
1309 break;
1311 case MULT:
1312 if (trueop1 == constm1_rtx)
1314 tem = simplify_unary_operation (NEG, mode, op0, mode);
1316 return tem ? tem : gen_rtx_NEG (mode, op0);
1319 /* In IEEE floating point, x*0 is not always 0. */
1320 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1321 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1322 && trueop1 == CONST0_RTX (mode)
1323 && ! side_effects_p (op0))
1324 return op1;
1326 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1327 However, ANSI says we can drop signals,
1328 so we can do this anyway. */
1329 if (trueop1 == CONST1_RTX (mode))
1330 return op0;
1332 /* Convert multiply by constant power of two into shift unless
1333 we are still generating RTL. This test is a kludge. */
1334 if (GET_CODE (trueop1) == CONST_INT
1335 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1336 /* If the mode is larger than the host word size, and the
1337 uppermost bit is set, then this isn't a power of two due
1338 to implicit sign extension. */
1339 && (width <= HOST_BITS_PER_WIDE_INT
1340 || val != HOST_BITS_PER_WIDE_INT - 1)
1341 && ! rtx_equal_function_value_matters)
1342 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1344 if (GET_CODE (trueop1) == CONST_DOUBLE
1345 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT)
1347 struct simplify_binary_is2orm1_args args;
1349 args.value = trueop1;
1350 if (! do_float_handler (simplify_binary_is2orm1, (PTR) &args))
1351 return 0;
1353 /* x*2 is x+x and x*(-1) is -x */
1354 if (args.is_2 && GET_MODE (op0) == mode)
1355 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1357 else if (args.is_m1 && GET_MODE (op0) == mode)
1358 return gen_rtx_NEG (mode, op0);
1360 break;
1362 case IOR:
1363 if (trueop1 == const0_rtx)
1364 return op0;
1365 if (GET_CODE (trueop1) == CONST_INT
1366 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1367 == GET_MODE_MASK (mode)))
1368 return op1;
1369 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1370 return op0;
1371 /* A | (~A) -> -1 */
1372 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1373 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1374 && ! side_effects_p (op0)
1375 && GET_MODE_CLASS (mode) != MODE_CC)
1376 return constm1_rtx;
1377 break;
1379 case XOR:
1380 if (trueop1 == const0_rtx)
1381 return op0;
1382 if (GET_CODE (trueop1) == CONST_INT
1383 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1384 == GET_MODE_MASK (mode)))
1385 return gen_rtx_NOT (mode, op0);
1386 if (trueop0 == trueop1 && ! side_effects_p (op0)
1387 && GET_MODE_CLASS (mode) != MODE_CC)
1388 return const0_rtx;
1389 break;
1391 case AND:
1392 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1393 return const0_rtx;
1394 if (GET_CODE (trueop1) == CONST_INT
1395 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1396 == GET_MODE_MASK (mode)))
1397 return op0;
1398 if (trueop0 == trueop1 && ! side_effects_p (op0)
1399 && GET_MODE_CLASS (mode) != MODE_CC)
1400 return op0;
1401 /* A & (~A) -> 0 */
1402 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1403 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1404 && ! side_effects_p (op0)
1405 && GET_MODE_CLASS (mode) != MODE_CC)
1406 return const0_rtx;
1407 break;
1409 case UDIV:
1410 /* Convert divide by power of two into shift (divide by 1 handled
1411 below). */
1412 if (GET_CODE (trueop1) == CONST_INT
1413 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1414 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1416 /* ... fall through ... */
1418 case DIV:
1419 if (trueop1 == CONST1_RTX (mode))
1421 /* On some platforms DIV uses narrower mode than its
1422 operands. */
1423 rtx x = gen_lowpart_common (mode, op0);
1424 if (x)
1425 return x;
1426 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1427 return gen_lowpart_SUBREG (mode, op0);
1428 else
1429 return op0;
1432 /* In IEEE floating point, 0/x is not always 0. */
1433 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1434 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1435 && trueop0 == CONST0_RTX (mode)
1436 && ! side_effects_p (op1))
1437 return op0;
1439 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1440 /* Change division by a constant into multiplication. Only do
1441 this with -funsafe-math-optimizations. */
1442 else if (GET_CODE (trueop1) == CONST_DOUBLE
1443 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1444 && trueop1 != CONST0_RTX (mode)
1445 && flag_unsafe_math_optimizations)
1447 REAL_VALUE_TYPE d;
1448 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1450 if (! REAL_VALUES_EQUAL (d, dconst0))
1452 #if defined (REAL_ARITHMETIC)
1453 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1454 return gen_rtx_MULT (mode, op0,
1455 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1456 #else
1457 return
1458 gen_rtx_MULT (mode, op0,
1459 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1460 #endif
1463 #endif
1464 break;
1466 case UMOD:
1467 /* Handle modulus by power of two (mod with 1 handled below). */
1468 if (GET_CODE (trueop1) == CONST_INT
1469 && exact_log2 (INTVAL (trueop1)) > 0)
1470 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1472 /* ... fall through ... */
1474 case MOD:
1475 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1476 && ! side_effects_p (op0) && ! side_effects_p (op1))
1477 return const0_rtx;
1478 break;
1480 case ROTATERT:
1481 case ROTATE:
1482 /* Rotating ~0 always results in ~0. */
1483 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1484 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1485 && ! side_effects_p (op1))
1486 return op0;
1488 /* ... fall through ... */
1490 case ASHIFT:
1491 case ASHIFTRT:
1492 case LSHIFTRT:
1493 if (trueop1 == const0_rtx)
1494 return op0;
1495 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1496 return op0;
1497 break;
1499 case SMIN:
1500 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1501 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1502 && ! side_effects_p (op0))
1503 return op1;
1504 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1505 return op0;
1506 break;
1508 case SMAX:
1509 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1510 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1511 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1512 && ! side_effects_p (op0))
1513 return op1;
1514 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1515 return op0;
1516 break;
1518 case UMIN:
1519 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1520 return op1;
1521 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1522 return op0;
1523 break;
1525 case UMAX:
1526 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1527 return op1;
1528 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1529 return op0;
1530 break;
1532 case SS_PLUS:
1533 case US_PLUS:
1534 case SS_MINUS:
1535 case US_MINUS:
1536 /* ??? There are simplifications that can be done. */
1537 return 0;
1539 default:
1540 abort ();
1543 return 0;
1546 /* Get the integer argument values in two forms:
1547 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1549 arg0 = INTVAL (trueop0);
1550 arg1 = INTVAL (trueop1);
1552 if (width < HOST_BITS_PER_WIDE_INT)
1554 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1555 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1557 arg0s = arg0;
1558 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1559 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1561 arg1s = arg1;
1562 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1563 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1565 else
1567 arg0s = arg0;
1568 arg1s = arg1;
1571 /* Compute the value of the arithmetic. */
1573 switch (code)
1575 case PLUS:
1576 val = arg0s + arg1s;
1577 break;
1579 case MINUS:
1580 val = arg0s - arg1s;
1581 break;
1583 case MULT:
1584 val = arg0s * arg1s;
1585 break;
1587 case DIV:
1588 if (arg1s == 0
1589 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1590 && arg1s == -1))
1591 return 0;
1592 val = arg0s / arg1s;
1593 break;
1595 case MOD:
1596 if (arg1s == 0
1597 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1598 && arg1s == -1))
1599 return 0;
1600 val = arg0s % arg1s;
1601 break;
1603 case UDIV:
1604 if (arg1 == 0
1605 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1606 && arg1s == -1))
1607 return 0;
1608 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1609 break;
1611 case UMOD:
1612 if (arg1 == 0
1613 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1614 && arg1s == -1))
1615 return 0;
1616 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1617 break;
1619 case AND:
1620 val = arg0 & arg1;
1621 break;
1623 case IOR:
1624 val = arg0 | arg1;
1625 break;
1627 case XOR:
1628 val = arg0 ^ arg1;
1629 break;
1631 case LSHIFTRT:
1632 /* If shift count is undefined, don't fold it; let the machine do
1633 what it wants. But truncate it if the machine will do that. */
1634 if (arg1 < 0)
1635 return 0;
1637 #ifdef SHIFT_COUNT_TRUNCATED
1638 if (SHIFT_COUNT_TRUNCATED)
1639 arg1 %= width;
1640 #endif
1642 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1643 break;
1645 case ASHIFT:
1646 if (arg1 < 0)
1647 return 0;
1649 #ifdef SHIFT_COUNT_TRUNCATED
1650 if (SHIFT_COUNT_TRUNCATED)
1651 arg1 %= width;
1652 #endif
1654 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1655 break;
1657 case ASHIFTRT:
1658 if (arg1 < 0)
1659 return 0;
1661 #ifdef SHIFT_COUNT_TRUNCATED
1662 if (SHIFT_COUNT_TRUNCATED)
1663 arg1 %= width;
1664 #endif
1666 val = arg0s >> arg1;
1668 /* Bootstrap compiler may not have sign extended the right shift.
1669 Manually extend the sign to insure bootstrap cc matches gcc. */
1670 if (arg0s < 0 && arg1 > 0)
1671 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1673 break;
1675 case ROTATERT:
1676 if (arg1 < 0)
1677 return 0;
1679 arg1 %= width;
1680 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1681 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1682 break;
1684 case ROTATE:
1685 if (arg1 < 0)
1686 return 0;
1688 arg1 %= width;
1689 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1690 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1691 break;
1693 case COMPARE:
1694 /* Do nothing here. */
1695 return 0;
1697 case SMIN:
1698 val = arg0s <= arg1s ? arg0s : arg1s;
1699 break;
1701 case UMIN:
1702 val = ((unsigned HOST_WIDE_INT) arg0
1703 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1704 break;
1706 case SMAX:
1707 val = arg0s > arg1s ? arg0s : arg1s;
1708 break;
1710 case UMAX:
1711 val = ((unsigned HOST_WIDE_INT) arg0
1712 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1713 break;
1715 default:
1716 abort ();
1719 val = trunc_int_for_mode (val, mode);
1721 return GEN_INT (val);
1724 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1725 PLUS or MINUS.
1727 Rather than test for specific case, we do this by a brute-force method
1728 and do all possible simplifications until no more changes occur. Then
1729 we rebuild the operation.
1731 If FORCE is true, then always generate the rtx. This is used to
1732 canonicalize stuff emitted from simplify_gen_binary. Note that this
1733 can still fail if the rtx is too complex. It won't fail just because
1734 the result is not 'simpler' than the input, however. */
1736 struct simplify_plus_minus_op_data
1738 rtx op;
1739 int neg;
1742 static int
1743 simplify_plus_minus_op_data_cmp (p1, p2)
1744 const void *p1;
1745 const void *p2;
1747 const struct simplify_plus_minus_op_data *d1 = p1;
1748 const struct simplify_plus_minus_op_data *d2 = p2;
1750 return (commutative_operand_precedence (d2->op)
1751 - commutative_operand_precedence (d1->op));
1754 static rtx
1755 simplify_plus_minus (code, mode, op0, op1, force)
1756 enum rtx_code code;
1757 enum machine_mode mode;
1758 rtx op0, op1;
1759 int force;
1761 struct simplify_plus_minus_op_data ops[8];
1762 rtx result, tem;
1763 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1764 int first, negate, changed;
1765 int i, j;
1767 memset ((char *) ops, 0, sizeof ops);
1769 /* Set up the two operands and then expand them until nothing has been
1770 changed. If we run out of room in our array, give up; this should
1771 almost never happen. */
1773 ops[0].op = op0;
1774 ops[0].neg = 0;
1775 ops[1].op = op1;
1776 ops[1].neg = (code == MINUS);
1780 changed = 0;
1782 for (i = 0; i < n_ops; i++)
1784 rtx this_op = ops[i].op;
1785 int this_neg = ops[i].neg;
1786 enum rtx_code this_code = GET_CODE (this_op);
1788 switch (this_code)
1790 case PLUS:
1791 case MINUS:
1792 if (n_ops == 7)
1793 return NULL_RTX;
1795 ops[n_ops].op = XEXP (this_op, 1);
1796 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1797 n_ops++;
1799 ops[i].op = XEXP (this_op, 0);
1800 input_ops++;
1801 changed = 1;
1802 break;
1804 case NEG:
1805 ops[i].op = XEXP (this_op, 0);
1806 ops[i].neg = ! this_neg;
1807 changed = 1;
1808 break;
1810 case CONST:
1811 if (n_ops < 7
1812 && GET_CODE (XEXP (this_op, 0)) == PLUS
1813 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1814 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1816 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1817 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1818 ops[n_ops].neg = this_neg;
1819 n_ops++;
1820 input_consts++;
1821 changed = 1;
1823 break;
1825 case NOT:
1826 /* ~a -> (-a - 1) */
1827 if (n_ops != 7)
1829 ops[n_ops].op = constm1_rtx;
1830 ops[n_ops++].neg = this_neg;
1831 ops[i].op = XEXP (this_op, 0);
1832 ops[i].neg = !this_neg;
1833 changed = 1;
1835 break;
1837 case CONST_INT:
1838 if (this_neg)
1840 ops[i].op = neg_const_int (mode, this_op);
1841 ops[i].neg = 0;
1842 changed = 1;
1844 break;
1846 default:
1847 break;
1851 while (changed);
1853 /* If we only have two operands, we can't do anything. */
1854 if (n_ops <= 2 && !force)
1855 return NULL_RTX;
1857 /* Count the number of CONSTs we didn't split above. */
1858 for (i = 0; i < n_ops; i++)
1859 if (GET_CODE (ops[i].op) == CONST)
1860 input_consts++;
1862 /* Now simplify each pair of operands until nothing changes. The first
1863 time through just simplify constants against each other. */
1865 first = 1;
1868 changed = first;
1870 for (i = 0; i < n_ops - 1; i++)
1871 for (j = i + 1; j < n_ops; j++)
1873 rtx lhs = ops[i].op, rhs = ops[j].op;
1874 int lneg = ops[i].neg, rneg = ops[j].neg;
1876 if (lhs != 0 && rhs != 0
1877 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1879 enum rtx_code ncode = PLUS;
1881 if (lneg != rneg)
1883 ncode = MINUS;
1884 if (lneg)
1885 tem = lhs, lhs = rhs, rhs = tem;
1887 else if (swap_commutative_operands_p (lhs, rhs))
1888 tem = lhs, lhs = rhs, rhs = tem;
1890 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1892 /* Reject "simplifications" that just wrap the two
1893 arguments in a CONST. Failure to do so can result
1894 in infinite recursion with simplify_binary_operation
1895 when it calls us to simplify CONST operations. */
1896 if (tem
1897 && ! (GET_CODE (tem) == CONST
1898 && GET_CODE (XEXP (tem, 0)) == ncode
1899 && XEXP (XEXP (tem, 0), 0) == lhs
1900 && XEXP (XEXP (tem, 0), 1) == rhs)
1901 /* Don't allow -x + -1 -> ~x simplifications in the
1902 first pass. This allows us the chance to combine
1903 the -1 with other constants. */
1904 && ! (first
1905 && GET_CODE (tem) == NOT
1906 && XEXP (tem, 0) == rhs))
1908 lneg &= rneg;
1909 if (GET_CODE (tem) == NEG)
1910 tem = XEXP (tem, 0), lneg = !lneg;
1911 if (GET_CODE (tem) == CONST_INT && lneg)
1912 tem = neg_const_int (mode, tem), lneg = 0;
1914 ops[i].op = tem;
1915 ops[i].neg = lneg;
1916 ops[j].op = NULL_RTX;
1917 changed = 1;
1922 first = 0;
1924 while (changed);
1926 /* Pack all the operands to the lower-numbered entries. */
1927 for (i = 0, j = 0; j < n_ops; j++)
1928 if (ops[j].op)
1929 ops[i++] = ops[j];
1930 n_ops = i;
1932 /* Sort the operations based on swap_commutative_operands_p. */
1933 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1935 /* We suppressed creation of trivial CONST expressions in the
1936 combination loop to avoid recursion. Create one manually now.
1937 The combination loop should have ensured that there is exactly
1938 one CONST_INT, and the sort will have ensured that it is last
1939 in the array and that any other constant will be next-to-last. */
1941 if (n_ops > 1
1942 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1943 && CONSTANT_P (ops[n_ops - 2].op))
1945 rtx value = ops[n_ops - 1].op;
1946 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1947 value = neg_const_int (mode, value);
1948 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1949 n_ops--;
1952 /* Count the number of CONSTs that we generated. */
1953 n_consts = 0;
1954 for (i = 0; i < n_ops; i++)
1955 if (GET_CODE (ops[i].op) == CONST)
1956 n_consts++;
1958 /* Give up if we didn't reduce the number of operands we had. Make
1959 sure we count a CONST as two operands. If we have the same
1960 number of operands, but have made more CONSTs than before, this
1961 is also an improvement, so accept it. */
1962 if (!force
1963 && (n_ops + n_consts > input_ops
1964 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1965 return NULL_RTX;
1967 /* Put a non-negated operand first. If there aren't any, make all
1968 operands positive and negate the whole thing later. */
1970 negate = 0;
1971 for (i = 0; i < n_ops && ops[i].neg; i++)
1972 continue;
1973 if (i == n_ops)
1975 for (i = 0; i < n_ops; i++)
1976 ops[i].neg = 0;
1977 negate = 1;
1979 else if (i != 0)
1981 tem = ops[0].op;
1982 ops[0] = ops[i];
1983 ops[i].op = tem;
1984 ops[i].neg = 1;
1987 /* Now make the result by performing the requested operations. */
1988 result = ops[0].op;
1989 for (i = 1; i < n_ops; i++)
1990 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1991 mode, result, ops[i].op);
1993 return negate ? gen_rtx_NEG (mode, result) : result;
1996 struct cfc_args
1998 rtx op0, op1; /* Input */
1999 int equal, op0lt, op1lt; /* Output */
2000 int unordered;
2003 static void
2004 check_fold_consts (data)
2005 PTR data;
2007 struct cfc_args *args = (struct cfc_args *) data;
2008 REAL_VALUE_TYPE d0, d1;
2010 /* We may possibly raise an exception while reading the value. */
2011 args->unordered = 1;
2012 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
2013 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
2015 /* Comparisons of Inf versus Inf are ordered. */
2016 if (REAL_VALUE_ISNAN (d0)
2017 || REAL_VALUE_ISNAN (d1))
2018 return;
2019 args->equal = REAL_VALUES_EQUAL (d0, d1);
2020 args->op0lt = REAL_VALUES_LESS (d0, d1);
2021 args->op1lt = REAL_VALUES_LESS (d1, d0);
2022 args->unordered = 0;
2025 /* Like simplify_binary_operation except used for relational operators.
2026 MODE is the mode of the operands, not that of the result. If MODE
2027 is VOIDmode, both operands must also be VOIDmode and we compare the
2028 operands in "infinite precision".
2030 If no simplification is possible, this function returns zero. Otherwise,
2031 it returns either const_true_rtx or const0_rtx. */
2034 simplify_relational_operation (code, mode, op0, op1)
2035 enum rtx_code code;
2036 enum machine_mode mode;
2037 rtx op0, op1;
2039 int equal, op0lt, op0ltu, op1lt, op1ltu;
2040 rtx tem;
2041 rtx trueop0;
2042 rtx trueop1;
2044 if (mode == VOIDmode
2045 && (GET_MODE (op0) != VOIDmode
2046 || GET_MODE (op1) != VOIDmode))
2047 abort ();
2049 /* If op0 is a compare, extract the comparison arguments from it. */
2050 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2051 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2053 trueop0 = avoid_constant_pool_reference (op0);
2054 trueop1 = avoid_constant_pool_reference (op1);
2056 /* We can't simplify MODE_CC values since we don't know what the
2057 actual comparison is. */
2058 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
2059 #ifdef HAVE_cc0
2060 || op0 == cc0_rtx
2061 #endif
2063 return 0;
2065 /* Make sure the constant is second. */
2066 if (swap_commutative_operands_p (trueop0, trueop1))
2068 tem = op0, op0 = op1, op1 = tem;
2069 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2070 code = swap_condition (code);
2073 /* For integer comparisons of A and B maybe we can simplify A - B and can
2074 then simplify a comparison of that with zero. If A and B are both either
2075 a register or a CONST_INT, this can't help; testing for these cases will
2076 prevent infinite recursion here and speed things up.
2078 If CODE is an unsigned comparison, then we can never do this optimization,
2079 because it gives an incorrect result if the subtraction wraps around zero.
2080 ANSI C defines unsigned operations such that they never overflow, and
2081 thus such cases can not be ignored. */
2083 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2084 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2085 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2086 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2087 && code != GTU && code != GEU && code != LTU && code != LEU)
2088 return simplify_relational_operation (signed_condition (code),
2089 mode, tem, const0_rtx);
2091 if (flag_unsafe_math_optimizations && code == ORDERED)
2092 return const_true_rtx;
2094 if (flag_unsafe_math_optimizations && code == UNORDERED)
2095 return const0_rtx;
2097 /* For non-IEEE floating-point, if the two operands are equal, we know the
2098 result. */
2099 if (rtx_equal_p (trueop0, trueop1)
2100 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
2101 || ! FLOAT_MODE_P (GET_MODE (trueop0))
2102 || flag_unsafe_math_optimizations))
2103 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2105 /* If the operands are floating-point constants, see if we can fold
2106 the result. */
2107 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2108 else if (GET_CODE (trueop0) == CONST_DOUBLE
2109 && GET_CODE (trueop1) == CONST_DOUBLE
2110 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2112 struct cfc_args args;
2114 /* Setup input for check_fold_consts() */
2115 args.op0 = trueop0;
2116 args.op1 = trueop1;
2119 if (!do_float_handler (check_fold_consts, (PTR) &args))
2120 args.unordered = 1;
2122 if (args.unordered)
2123 switch (code)
2125 case UNEQ:
2126 case UNLT:
2127 case UNGT:
2128 case UNLE:
2129 case UNGE:
2130 case NE:
2131 case UNORDERED:
2132 return const_true_rtx;
2133 case EQ:
2134 case LT:
2135 case GT:
2136 case LE:
2137 case GE:
2138 case LTGT:
2139 case ORDERED:
2140 return const0_rtx;
2141 default:
2142 return 0;
2145 /* Receive output from check_fold_consts() */
2146 equal = args.equal;
2147 op0lt = op0ltu = args.op0lt;
2148 op1lt = op1ltu = args.op1lt;
2150 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2152 /* Otherwise, see if the operands are both integers. */
2153 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2154 && (GET_CODE (trueop0) == CONST_DOUBLE
2155 || GET_CODE (trueop0) == CONST_INT)
2156 && (GET_CODE (trueop1) == CONST_DOUBLE
2157 || GET_CODE (trueop1) == CONST_INT))
2159 int width = GET_MODE_BITSIZE (mode);
2160 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2161 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2163 /* Get the two words comprising each integer constant. */
2164 if (GET_CODE (trueop0) == CONST_DOUBLE)
2166 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2167 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2169 else
2171 l0u = l0s = INTVAL (trueop0);
2172 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2175 if (GET_CODE (trueop1) == CONST_DOUBLE)
2177 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2178 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2180 else
2182 l1u = l1s = INTVAL (trueop1);
2183 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2186 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2187 we have to sign or zero-extend the values. */
2188 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2190 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2191 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2193 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2194 l0s |= ((HOST_WIDE_INT) (-1) << width);
2196 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2197 l1s |= ((HOST_WIDE_INT) (-1) << width);
2199 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2200 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2202 equal = (h0u == h1u && l0u == l1u);
2203 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2204 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2205 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2206 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2209 /* Otherwise, there are some code-specific tests we can make. */
2210 else
2212 switch (code)
2214 case EQ:
2215 /* References to the frame plus a constant or labels cannot
2216 be zero, but a SYMBOL_REF can due to #pragma weak. */
2217 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2218 || GET_CODE (trueop0) == LABEL_REF)
2219 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2220 /* On some machines, the ap reg can be 0 sometimes. */
2221 && op0 != arg_pointer_rtx
2222 #endif
2224 return const0_rtx;
2225 break;
2227 case NE:
2228 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2229 || GET_CODE (trueop0) == LABEL_REF)
2230 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2231 && op0 != arg_pointer_rtx
2232 #endif
2234 return const_true_rtx;
2235 break;
2237 case GEU:
2238 /* Unsigned values are never negative. */
2239 if (trueop1 == const0_rtx)
2240 return const_true_rtx;
2241 break;
2243 case LTU:
2244 if (trueop1 == const0_rtx)
2245 return const0_rtx;
2246 break;
2248 case LEU:
2249 /* Unsigned values are never greater than the largest
2250 unsigned value. */
2251 if (GET_CODE (trueop1) == CONST_INT
2252 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2253 && INTEGRAL_MODE_P (mode))
2254 return const_true_rtx;
2255 break;
2257 case GTU:
2258 if (GET_CODE (trueop1) == CONST_INT
2259 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2260 && INTEGRAL_MODE_P (mode))
2261 return const0_rtx;
2262 break;
2264 default:
2265 break;
2268 return 0;
2271 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2272 as appropriate. */
2273 switch (code)
2275 case EQ:
2276 case UNEQ:
2277 return equal ? const_true_rtx : const0_rtx;
2278 case NE:
2279 case LTGT:
2280 return ! equal ? const_true_rtx : const0_rtx;
2281 case LT:
2282 case UNLT:
2283 return op0lt ? const_true_rtx : const0_rtx;
2284 case GT:
2285 case UNGT:
2286 return op1lt ? const_true_rtx : const0_rtx;
2287 case LTU:
2288 return op0ltu ? const_true_rtx : const0_rtx;
2289 case GTU:
2290 return op1ltu ? const_true_rtx : const0_rtx;
2291 case LE:
2292 case UNLE:
2293 return equal || op0lt ? const_true_rtx : const0_rtx;
2294 case GE:
2295 case UNGE:
2296 return equal || op1lt ? const_true_rtx : const0_rtx;
2297 case LEU:
2298 return equal || op0ltu ? const_true_rtx : const0_rtx;
2299 case GEU:
2300 return equal || op1ltu ? const_true_rtx : const0_rtx;
2301 case ORDERED:
2302 return const_true_rtx;
2303 case UNORDERED:
2304 return const0_rtx;
2305 default:
2306 abort ();
2310 /* Simplify CODE, an operation with result mode MODE and three operands,
2311 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2312 a constant. Return 0 if no simplifications is possible. */
2315 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2316 enum rtx_code code;
2317 enum machine_mode mode, op0_mode;
2318 rtx op0, op1, op2;
2320 unsigned int width = GET_MODE_BITSIZE (mode);
2322 /* VOIDmode means "infinite" precision. */
2323 if (width == 0)
2324 width = HOST_BITS_PER_WIDE_INT;
2326 switch (code)
2328 case SIGN_EXTRACT:
2329 case ZERO_EXTRACT:
2330 if (GET_CODE (op0) == CONST_INT
2331 && GET_CODE (op1) == CONST_INT
2332 && GET_CODE (op2) == CONST_INT
2333 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2334 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2336 /* Extracting a bit-field from a constant */
2337 HOST_WIDE_INT val = INTVAL (op0);
2339 if (BITS_BIG_ENDIAN)
2340 val >>= (GET_MODE_BITSIZE (op0_mode)
2341 - INTVAL (op2) - INTVAL (op1));
2342 else
2343 val >>= INTVAL (op2);
2345 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2347 /* First zero-extend. */
2348 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2349 /* If desired, propagate sign bit. */
2350 if (code == SIGN_EXTRACT
2351 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2352 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2355 /* Clear the bits that don't belong in our mode,
2356 unless they and our sign bit are all one.
2357 So we get either a reasonable negative value or a reasonable
2358 unsigned value for this mode. */
2359 if (width < HOST_BITS_PER_WIDE_INT
2360 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2361 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2362 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2364 return GEN_INT (val);
2366 break;
2368 case IF_THEN_ELSE:
2369 if (GET_CODE (op0) == CONST_INT)
2370 return op0 != const0_rtx ? op1 : op2;
2372 /* Convert a == b ? b : a to "a". */
2373 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2374 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2375 && rtx_equal_p (XEXP (op0, 0), op1)
2376 && rtx_equal_p (XEXP (op0, 1), op2))
2377 return op1;
2378 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2379 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2380 && rtx_equal_p (XEXP (op0, 1), op1)
2381 && rtx_equal_p (XEXP (op0, 0), op2))
2382 return op2;
2383 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2385 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2386 ? GET_MODE (XEXP (op0, 1))
2387 : GET_MODE (XEXP (op0, 0)));
2388 rtx temp;
2389 if (cmp_mode == VOIDmode)
2390 cmp_mode = op0_mode;
2391 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2392 XEXP (op0, 0), XEXP (op0, 1));
2394 /* See if any simplifications were possible. */
2395 if (temp == const0_rtx)
2396 return op2;
2397 else if (temp == const1_rtx)
2398 return op1;
2399 else if (temp)
2400 op0 = temp;
2402 /* Look for happy constants in op1 and op2. */
2403 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2405 HOST_WIDE_INT t = INTVAL (op1);
2406 HOST_WIDE_INT f = INTVAL (op2);
2408 if (t == STORE_FLAG_VALUE && f == 0)
2409 code = GET_CODE (op0);
2410 else if (t == 0 && f == STORE_FLAG_VALUE)
2412 enum rtx_code tmp;
2413 tmp = reversed_comparison_code (op0, NULL_RTX);
2414 if (tmp == UNKNOWN)
2415 break;
2416 code = tmp;
2418 else
2419 break;
2421 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2424 break;
2426 default:
2427 abort ();
2430 return 0;
2433 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2434 Return 0 if no simplifications is possible. */
2436 simplify_subreg (outermode, op, innermode, byte)
2437 rtx op;
2438 unsigned int byte;
2439 enum machine_mode outermode, innermode;
2441 /* Little bit of sanity checking. */
2442 if (innermode == VOIDmode || outermode == VOIDmode
2443 || innermode == BLKmode || outermode == BLKmode)
2444 abort ();
2446 if (GET_MODE (op) != innermode
2447 && GET_MODE (op) != VOIDmode)
2448 abort ();
2450 if (byte % GET_MODE_SIZE (outermode)
2451 || byte >= GET_MODE_SIZE (innermode))
2452 abort ();
2454 if (outermode == innermode && !byte)
2455 return op;
2457 /* Attempt to simplify constant to non-SUBREG expression. */
2458 if (CONSTANT_P (op))
2460 int offset, part;
2461 unsigned HOST_WIDE_INT val = 0;
2463 /* ??? This code is partly redundant with code below, but can handle
2464 the subregs of floats and similar corner cases.
2465 Later it we should move all simplification code here and rewrite
2466 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2467 using SIMPLIFY_SUBREG. */
2468 if (subreg_lowpart_offset (outermode, innermode) == byte)
2470 rtx new = gen_lowpart_if_possible (outermode, op);
2471 if (new)
2472 return new;
2475 /* Similar comment as above apply here. */
2476 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2477 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2478 && GET_MODE_CLASS (outermode) == MODE_INT)
2480 rtx new = constant_subword (op,
2481 (byte / UNITS_PER_WORD),
2482 innermode);
2483 if (new)
2484 return new;
2487 offset = byte * BITS_PER_UNIT;
2488 switch (GET_CODE (op))
2490 case CONST_DOUBLE:
2491 if (GET_MODE (op) != VOIDmode)
2492 break;
2494 /* We can't handle this case yet. */
2495 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2496 return NULL_RTX;
2498 part = offset >= HOST_BITS_PER_WIDE_INT;
2499 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2500 && BYTES_BIG_ENDIAN)
2501 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2502 && WORDS_BIG_ENDIAN))
2503 part = !part;
2504 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2505 offset %= HOST_BITS_PER_WIDE_INT;
2507 /* We've already picked the word we want from a double, so
2508 pretend this is actually an integer. */
2509 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2511 /* FALLTHROUGH */
2512 case CONST_INT:
2513 if (GET_CODE (op) == CONST_INT)
2514 val = INTVAL (op);
2516 /* We don't handle synthetizing of non-integral constants yet. */
2517 if (GET_MODE_CLASS (outermode) != MODE_INT)
2518 return NULL_RTX;
2520 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2522 if (WORDS_BIG_ENDIAN)
2523 offset = (GET_MODE_BITSIZE (innermode)
2524 - GET_MODE_BITSIZE (outermode) - offset);
2525 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2526 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2527 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2528 - 2 * (offset % BITS_PER_WORD));
2531 if (offset >= HOST_BITS_PER_WIDE_INT)
2532 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2533 else
2535 val >>= offset;
2536 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2537 val = trunc_int_for_mode (val, outermode);
2538 return GEN_INT (val);
2540 default:
2541 break;
2545 /* Changing mode twice with SUBREG => just change it once,
2546 or not at all if changing back op starting mode. */
2547 if (GET_CODE (op) == SUBREG)
2549 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2550 int final_offset = byte + SUBREG_BYTE (op);
2551 rtx new;
2553 if (outermode == innermostmode
2554 && byte == 0 && SUBREG_BYTE (op) == 0)
2555 return SUBREG_REG (op);
2557 /* The SUBREG_BYTE represents offset, as if the value were stored
2558 in memory. Irritating exception is paradoxical subreg, where
2559 we define SUBREG_BYTE to be 0. On big endian machines, this
2560 value should be negative. For a moment, undo this exception. */
2561 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2563 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2564 if (WORDS_BIG_ENDIAN)
2565 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2566 if (BYTES_BIG_ENDIAN)
2567 final_offset += difference % UNITS_PER_WORD;
2569 if (SUBREG_BYTE (op) == 0
2570 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2572 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2573 if (WORDS_BIG_ENDIAN)
2574 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2575 if (BYTES_BIG_ENDIAN)
2576 final_offset += difference % UNITS_PER_WORD;
2579 /* See whether resulting subreg will be paradoxical. */
2580 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2582 /* In nonparadoxical subregs we can't handle negative offsets. */
2583 if (final_offset < 0)
2584 return NULL_RTX;
2585 /* Bail out in case resulting subreg would be incorrect. */
2586 if (final_offset % GET_MODE_SIZE (outermode)
2587 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2588 return NULL_RTX;
2590 else
2592 int offset = 0;
2593 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2595 /* In paradoxical subreg, see if we are still looking on lower part.
2596 If so, our SUBREG_BYTE will be 0. */
2597 if (WORDS_BIG_ENDIAN)
2598 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2599 if (BYTES_BIG_ENDIAN)
2600 offset += difference % UNITS_PER_WORD;
2601 if (offset == final_offset)
2602 final_offset = 0;
2603 else
2604 return NULL_RTX;
2607 /* Recurse for futher possible simplifications. */
2608 new = simplify_subreg (outermode, SUBREG_REG (op),
2609 GET_MODE (SUBREG_REG (op)),
2610 final_offset);
2611 if (new)
2612 return new;
2613 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2616 /* SUBREG of a hard register => just change the register number
2617 and/or mode. If the hard register is not valid in that mode,
2618 suppress this simplification. If the hard register is the stack,
2619 frame, or argument pointer, leave this as a SUBREG. */
2621 if (REG_P (op)
2622 && (! REG_FUNCTION_VALUE_P (op)
2623 || ! rtx_equal_function_value_matters)
2624 #ifdef CLASS_CANNOT_CHANGE_MODE
2625 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2626 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2627 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2628 && (TEST_HARD_REG_BIT
2629 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2630 REGNO (op))))
2631 #endif
2632 && REGNO (op) < FIRST_PSEUDO_REGISTER
2633 && ((reload_completed && !frame_pointer_needed)
2634 || (REGNO (op) != FRAME_POINTER_REGNUM
2635 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2636 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2637 #endif
2639 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2640 && REGNO (op) != ARG_POINTER_REGNUM
2641 #endif
2642 && REGNO (op) != STACK_POINTER_REGNUM)
2644 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2647 /* ??? We do allow it if the current REG is not valid for
2648 its mode. This is a kludge to work around how float/complex
2649 arguments are passed on 32-bit Sparc and should be fixed. */
2650 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2651 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2653 rtx x = gen_rtx_REG (outermode, final_regno);
2655 /* Propagate original regno. We don't have any way to specify
2656 the offset inside orignal regno, so do so only for lowpart.
2657 The information is used only by alias analysis that can not
2658 grog partial register anyway. */
2660 if (subreg_lowpart_offset (outermode, innermode) == byte)
2661 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2662 return x;
2666 /* If we have a SUBREG of a register that we are replacing and we are
2667 replacing it with a MEM, make a new MEM and try replacing the
2668 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2669 or if we would be widening it. */
2671 if (GET_CODE (op) == MEM
2672 && ! mode_dependent_address_p (XEXP (op, 0))
2673 /* Allow splitting of volatile memory references in case we don't
2674 have instruction to move the whole thing. */
2675 && (! MEM_VOLATILE_P (op)
2676 || ! have_insn_for (SET, innermode))
2677 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2678 return adjust_address_nv (op, outermode, byte);
2680 /* Handle complex values represented as CONCAT
2681 of real and imaginary part. */
2682 if (GET_CODE (op) == CONCAT)
2684 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2685 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2686 unsigned int final_offset;
2687 rtx res;
2689 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2690 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2691 if (res)
2692 return res;
2693 /* We can at least simplify it by referring directly to the relevant part. */
2694 return gen_rtx_SUBREG (outermode, part, final_offset);
2697 return NULL_RTX;
2699 /* Make a SUBREG operation or equivalent if it folds. */
2702 simplify_gen_subreg (outermode, op, innermode, byte)
2703 rtx op;
2704 unsigned int byte;
2705 enum machine_mode outermode, innermode;
2707 rtx new;
2708 /* Little bit of sanity checking. */
2709 if (innermode == VOIDmode || outermode == VOIDmode
2710 || innermode == BLKmode || outermode == BLKmode)
2711 abort ();
2713 if (GET_MODE (op) != innermode
2714 && GET_MODE (op) != VOIDmode)
2715 abort ();
2717 if (byte % GET_MODE_SIZE (outermode)
2718 || byte >= GET_MODE_SIZE (innermode))
2719 abort ();
2721 if (GET_CODE (op) == QUEUED)
2722 return NULL_RTX;
2724 new = simplify_subreg (outermode, op, innermode, byte);
2725 if (new)
2726 return new;
2728 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2729 return NULL_RTX;
2731 return gen_rtx_SUBREG (outermode, op, byte);
2733 /* Simplify X, an rtx expression.
2735 Return the simplified expression or NULL if no simplifications
2736 were possible.
2738 This is the preferred entry point into the simplification routines;
2739 however, we still allow passes to call the more specific routines.
2741 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2742 code that need to be unified.
2744 1. fold_rtx in cse.c. This code uses various CSE specific
2745 information to aid in RTL simplification.
2747 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2748 it uses combine specific information to aid in RTL
2749 simplification.
2751 3. The routines in this file.
2754 Long term we want to only have one body of simplification code; to
2755 get to that state I recommend the following steps:
2757 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2758 which are not pass dependent state into these routines.
2760 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2761 use this routine whenever possible.
2763 3. Allow for pass dependent state to be provided to these
2764 routines and add simplifications based on the pass dependent
2765 state. Remove code from cse.c & combine.c that becomes
2766 redundant/dead.
2768 It will take time, but ultimately the compiler will be easier to
2769 maintain and improve. It's totally silly that when we add a
2770 simplification that it needs to be added to 4 places (3 for RTL
2771 simplification and 1 for tree simplification. */
2774 simplify_rtx (x)
2775 rtx x;
2777 enum rtx_code code = GET_CODE (x);
2778 enum machine_mode mode = GET_MODE (x);
2780 switch (GET_RTX_CLASS (code))
2782 case '1':
2783 return simplify_unary_operation (code, mode,
2784 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2785 case 'c':
2786 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2788 rtx tem;
2790 tem = XEXP (x, 0);
2791 XEXP (x, 0) = XEXP (x, 1);
2792 XEXP (x, 1) = tem;
2793 return simplify_binary_operation (code, mode,
2794 XEXP (x, 0), XEXP (x, 1));
2797 case '2':
2798 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2800 case '3':
2801 case 'b':
2802 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2803 XEXP (x, 0), XEXP (x, 1),
2804 XEXP (x, 2));
2806 case '<':
2807 return simplify_relational_operation (code,
2808 ((GET_MODE (XEXP (x, 0))
2809 != VOIDmode)
2810 ? GET_MODE (XEXP (x, 0))
2811 : GET_MODE (XEXP (x, 1))),
2812 XEXP (x, 0), XEXP (x, 1));
2813 case 'x':
2814 /* The only case we try to handle is a SUBREG. */
2815 if (code == SUBREG)
2816 return simplify_gen_subreg (mode, SUBREG_REG (x),
2817 GET_MODE (SUBREG_REG (x)),
2818 SUBREG_BYTE (x));
2819 return NULL;
2820 default:
2821 return NULL;