* config/h8300/h8300-protos.h: Update the prototype for
[official-gcc.git] / gcc / simplify-rtx.c
blob55cbfc6fbbe32b7775508837616faac59d1cd210
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "toplev.h"
37 #include "output.h"
38 #include "ggc.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
65 /* Similar, but also allows reference to the stack pointer.
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
91 /* Much code operates on (low, high) pairs; the low value is an
92 unsigned wide int, the high value a signed wide int. We
93 occasionally need to sign extend from low to high as if low were a
94 signed wide int. */
95 #define HWI_SIGN_EXTEND(low) \
96 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
99 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
100 const void *));
101 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
102 enum machine_mode, rtx,
103 rtx, int));
104 static void check_fold_consts PARAMS ((PTR));
105 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
106 static void simplify_unary_real PARAMS ((PTR));
107 static void simplify_binary_real PARAMS ((PTR));
108 #endif
109 static void simplify_binary_is2orm1 PARAMS ((PTR));
112 /* Negate a CONST_INT rtx, truncating (because a conversion from a
113 maximally negative number can overflow). */
114 static rtx
115 neg_const_int (mode, i)
116 enum machine_mode mode;
117 rtx i;
119 return GEN_INT (trunc_int_for_mode (- INTVAL (i), mode));
123 /* Make a binary operation by properly ordering the operands and
124 seeing if the expression folds. */
127 simplify_gen_binary (code, mode, op0, op1)
128 enum rtx_code code;
129 enum machine_mode mode;
130 rtx op0, op1;
132 rtx tem;
134 /* Put complex operands first and constants second if commutative. */
135 if (GET_RTX_CLASS (code) == 'c'
136 && swap_commutative_operands_p (op0, op1))
137 tem = op0, op0 = op1, op1 = tem;
139 /* If this simplifies, do it. */
140 tem = simplify_binary_operation (code, mode, op0, op1);
141 if (tem)
142 return tem;
144 /* Handle addition and subtraction specially. Otherwise, just form
145 the operation. */
147 if (code == PLUS || code == MINUS)
148 return simplify_plus_minus (code, mode, op0, op1, 1);
149 else
150 return gen_rtx_fmt_ee (code, mode, op0, op1);
153 /* If X is a MEM referencing the constant pool, return the real value.
154 Otherwise return X. */
156 avoid_constant_pool_reference (x)
157 rtx x;
159 rtx c, addr;
160 enum machine_mode cmode;
162 if (GET_CODE (x) != MEM)
163 return x;
164 addr = XEXP (x, 0);
166 if (GET_CODE (addr) != SYMBOL_REF
167 || ! CONSTANT_POOL_ADDRESS_P (addr))
168 return x;
170 c = get_pool_constant (addr);
171 cmode = get_pool_mode (addr);
173 /* If we're accessing the constant in a different mode than it was
174 originally stored, attempt to fix that up via subreg simplifications.
175 If that fails we have no choice but to return the original memory. */
176 if (cmode != GET_MODE (x))
178 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
179 return c ? c : x;
182 return c;
185 /* Make a unary operation by first seeing if it folds and otherwise making
186 the specified operation. */
189 simplify_gen_unary (code, mode, op, op_mode)
190 enum rtx_code code;
191 enum machine_mode mode;
192 rtx op;
193 enum machine_mode op_mode;
195 rtx tem;
197 /* If this simplifies, use it. */
198 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
199 return tem;
201 return gen_rtx_fmt_e (code, mode, op);
204 /* Likewise for ternary operations. */
207 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
208 enum rtx_code code;
209 enum machine_mode mode, op0_mode;
210 rtx op0, op1, op2;
212 rtx tem;
214 /* If this simplifies, use it. */
215 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
216 op0, op1, op2)))
217 return tem;
219 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
222 /* Likewise, for relational operations.
223 CMP_MODE specifies mode comparison is done in.
227 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
228 enum rtx_code code;
229 enum machine_mode mode;
230 enum machine_mode cmp_mode;
231 rtx op0, op1;
233 rtx tem;
235 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
236 return tem;
238 /* Put complex operands first and constants second. */
239 if (swap_commutative_operands_p (op0, op1))
240 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
242 return gen_rtx_fmt_ee (code, mode, op0, op1);
245 /* Replace all occurrences of OLD in X with NEW and try to simplify the
246 resulting RTX. Return a new RTX which is as simplified as possible. */
249 simplify_replace_rtx (x, old, new)
250 rtx x;
251 rtx old;
252 rtx new;
254 enum rtx_code code = GET_CODE (x);
255 enum machine_mode mode = GET_MODE (x);
257 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
258 to build a new expression substituting recursively. If we can't do
259 anything, return our input. */
261 if (x == old)
262 return new;
264 switch (GET_RTX_CLASS (code))
266 case '1':
268 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
269 rtx op = (XEXP (x, 0) == old
270 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
272 return simplify_gen_unary (code, mode, op, op_mode);
275 case '2':
276 case 'c':
277 return
278 simplify_gen_binary (code, mode,
279 simplify_replace_rtx (XEXP (x, 0), old, new),
280 simplify_replace_rtx (XEXP (x, 1), old, new));
281 case '<':
283 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
284 ? GET_MODE (XEXP (x, 0))
285 : GET_MODE (XEXP (x, 1)));
286 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
287 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
289 return
290 simplify_gen_relational (code, mode,
291 (op_mode != VOIDmode
292 ? op_mode
293 : GET_MODE (op0) != VOIDmode
294 ? GET_MODE (op0)
295 : GET_MODE (op1)),
296 op0, op1);
299 case '3':
300 case 'b':
302 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
303 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
305 return
306 simplify_gen_ternary (code, mode,
307 (op_mode != VOIDmode
308 ? op_mode
309 : GET_MODE (op0)),
310 op0,
311 simplify_replace_rtx (XEXP (x, 1), old, new),
312 simplify_replace_rtx (XEXP (x, 2), old, new));
315 case 'x':
316 /* The only case we try to handle is a SUBREG. */
317 if (code == SUBREG)
319 rtx exp;
320 exp = simplify_gen_subreg (GET_MODE (x),
321 simplify_replace_rtx (SUBREG_REG (x),
322 old, new),
323 GET_MODE (SUBREG_REG (x)),
324 SUBREG_BYTE (x));
325 if (exp)
326 x = exp;
328 return x;
330 default:
331 if (GET_CODE (x) == MEM)
332 return
333 replace_equiv_address_nv (x,
334 simplify_replace_rtx (XEXP (x, 0),
335 old, new));
337 return x;
339 return x;
342 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
343 /* Subroutine of simplify_unary_operation, called via do_float_handler.
344 Handles simplification of unary ops on floating point values. */
345 struct simplify_unary_real_args
347 rtx operand;
348 rtx result;
349 enum machine_mode mode;
350 enum rtx_code code;
351 bool want_integer;
353 #define REAL_VALUE_ABS(d_) \
354 (REAL_VALUE_NEGATIVE (d_) ? REAL_VALUE_NEGATE (d_) : (d_))
356 static void
357 simplify_unary_real (p)
358 PTR p;
360 REAL_VALUE_TYPE d;
362 struct simplify_unary_real_args *args =
363 (struct simplify_unary_real_args *) p;
365 REAL_VALUE_FROM_CONST_DOUBLE (d, args->operand);
367 if (args->want_integer)
369 HOST_WIDE_INT i;
371 switch (args->code)
373 case FIX: i = REAL_VALUE_FIX (d); break;
374 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
375 default:
376 abort ();
378 args->result = GEN_INT (trunc_int_for_mode (i, args->mode));
380 else
382 switch (args->code)
384 case SQRT:
385 /* We don't attempt to optimize this. */
386 args->result = 0;
387 return;
389 case ABS: d = REAL_VALUE_ABS (d); break;
390 case NEG: d = REAL_VALUE_NEGATE (d); break;
391 case FLOAT_TRUNCATE: d = real_value_truncate (args->mode, d); break;
392 case FLOAT_EXTEND: /* All this does is change the mode. */ break;
393 case FIX: d = REAL_VALUE_RNDZINT (d); break;
394 case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break;
395 default:
396 abort ();
398 args->result = CONST_DOUBLE_FROM_REAL_VALUE (d, args->mode);
401 #endif
403 /* Try to simplify a unary operation CODE whose output mode is to be
404 MODE with input operand OP whose mode was originally OP_MODE.
405 Return zero if no simplification can be made. */
407 simplify_unary_operation (code, mode, op, op_mode)
408 enum rtx_code code;
409 enum machine_mode mode;
410 rtx op;
411 enum machine_mode op_mode;
413 unsigned int width = GET_MODE_BITSIZE (mode);
414 rtx trueop = avoid_constant_pool_reference (op);
416 /* The order of these tests is critical so that, for example, we don't
417 check the wrong mode (input vs. output) for a conversion operation,
418 such as FIX. At some point, this should be simplified. */
420 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
422 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
423 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
425 HOST_WIDE_INT hv, lv;
426 REAL_VALUE_TYPE d;
428 if (GET_CODE (trueop) == CONST_INT)
429 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
430 else
431 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
433 #ifdef REAL_ARITHMETIC
434 REAL_VALUE_FROM_INT (d, lv, hv, mode);
435 #else
436 if (hv < 0)
438 d = (double) (~ hv);
439 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
440 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
441 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
442 d = (- d - 1.0);
444 else
446 d = (double) hv;
447 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
448 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
449 d += (double) (unsigned HOST_WIDE_INT) lv;
451 #endif /* REAL_ARITHMETIC */
452 d = real_value_truncate (mode, d);
453 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
455 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
456 && (GET_CODE (trueop) == CONST_DOUBLE
457 || GET_CODE (trueop) == CONST_INT))
459 HOST_WIDE_INT hv, lv;
460 REAL_VALUE_TYPE d;
462 if (GET_CODE (trueop) == CONST_INT)
463 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
464 else
465 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
467 if (op_mode == VOIDmode)
469 /* We don't know how to interpret negative-looking numbers in
470 this case, so don't try to fold those. */
471 if (hv < 0)
472 return 0;
474 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
476 else
477 hv = 0, lv &= GET_MODE_MASK (op_mode);
479 #ifdef REAL_ARITHMETIC
480 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
481 #else
483 d = (double) (unsigned HOST_WIDE_INT) hv;
484 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
485 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
486 d += (double) (unsigned HOST_WIDE_INT) lv;
487 #endif /* REAL_ARITHMETIC */
488 d = real_value_truncate (mode, d);
489 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
491 #endif
493 if (GET_CODE (trueop) == CONST_INT
494 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
496 HOST_WIDE_INT arg0 = INTVAL (trueop);
497 HOST_WIDE_INT val;
499 switch (code)
501 case NOT:
502 val = ~ arg0;
503 break;
505 case NEG:
506 val = - arg0;
507 break;
509 case ABS:
510 val = (arg0 >= 0 ? arg0 : - arg0);
511 break;
513 case FFS:
514 /* Don't use ffs here. Instead, get low order bit and then its
515 number. If arg0 is zero, this will return 0, as desired. */
516 arg0 &= GET_MODE_MASK (mode);
517 val = exact_log2 (arg0 & (- arg0)) + 1;
518 break;
520 case TRUNCATE:
521 val = arg0;
522 break;
524 case ZERO_EXTEND:
525 /* When zero-extending a CONST_INT, we need to know its
526 original mode. */
527 if (op_mode == VOIDmode)
528 abort ();
529 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
531 /* If we were really extending the mode,
532 we would have to distinguish between zero-extension
533 and sign-extension. */
534 if (width != GET_MODE_BITSIZE (op_mode))
535 abort ();
536 val = arg0;
538 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
539 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
540 else
541 return 0;
542 break;
544 case SIGN_EXTEND:
545 if (op_mode == VOIDmode)
546 op_mode = mode;
547 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
549 /* If we were really extending the mode,
550 we would have to distinguish between zero-extension
551 and sign-extension. */
552 if (width != GET_MODE_BITSIZE (op_mode))
553 abort ();
554 val = arg0;
556 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
559 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
560 if (val
561 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
562 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
564 else
565 return 0;
566 break;
568 case SQRT:
569 case FLOAT_EXTEND:
570 case FLOAT_TRUNCATE:
571 case SS_TRUNCATE:
572 case US_TRUNCATE:
573 return 0;
575 default:
576 abort ();
579 val = trunc_int_for_mode (val, mode);
581 return GEN_INT (val);
584 /* We can do some operations on integer CONST_DOUBLEs. Also allow
585 for a DImode operation on a CONST_INT. */
586 else if (GET_MODE (trueop) == VOIDmode
587 && width <= HOST_BITS_PER_WIDE_INT * 2
588 && (GET_CODE (trueop) == CONST_DOUBLE
589 || GET_CODE (trueop) == CONST_INT))
591 unsigned HOST_WIDE_INT l1, lv;
592 HOST_WIDE_INT h1, hv;
594 if (GET_CODE (trueop) == CONST_DOUBLE)
595 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
596 else
597 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
599 switch (code)
601 case NOT:
602 lv = ~ l1;
603 hv = ~ h1;
604 break;
606 case NEG:
607 neg_double (l1, h1, &lv, &hv);
608 break;
610 case ABS:
611 if (h1 < 0)
612 neg_double (l1, h1, &lv, &hv);
613 else
614 lv = l1, hv = h1;
615 break;
617 case FFS:
618 hv = 0;
619 if (l1 == 0)
620 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
621 else
622 lv = exact_log2 (l1 & (-l1)) + 1;
623 break;
625 case TRUNCATE:
626 /* This is just a change-of-mode, so do nothing. */
627 lv = l1, hv = h1;
628 break;
630 case ZERO_EXTEND:
631 if (op_mode == VOIDmode)
632 abort ();
634 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
635 return 0;
637 hv = 0;
638 lv = l1 & GET_MODE_MASK (op_mode);
639 break;
641 case SIGN_EXTEND:
642 if (op_mode == VOIDmode
643 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
644 return 0;
645 else
647 lv = l1 & GET_MODE_MASK (op_mode);
648 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
649 && (lv & ((HOST_WIDE_INT) 1
650 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
651 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
653 hv = HWI_SIGN_EXTEND (lv);
655 break;
657 case SQRT:
658 return 0;
660 default:
661 return 0;
664 return immed_double_const (lv, hv, mode);
667 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
668 else if (GET_CODE (trueop) == CONST_DOUBLE
669 && GET_MODE_CLASS (mode) == MODE_FLOAT)
671 struct simplify_unary_real_args args;
672 args.operand = trueop;
673 args.mode = mode;
674 args.code = code;
675 args.want_integer = false;
677 if (do_float_handler (simplify_unary_real, (PTR) &args))
678 return args.result;
680 return 0;
683 else if (GET_CODE (trueop) == CONST_DOUBLE
684 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
685 && GET_MODE_CLASS (mode) == MODE_INT
686 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
688 struct simplify_unary_real_args args;
689 args.operand = trueop;
690 args.mode = mode;
691 args.code = code;
692 args.want_integer = true;
694 if (do_float_handler (simplify_unary_real, (PTR) &args))
695 return args.result;
697 return 0;
699 #endif
700 /* This was formerly used only for non-IEEE float.
701 eggert@twinsun.com says it is safe for IEEE also. */
702 else
704 enum rtx_code reversed;
705 /* There are some simplifications we can do even if the operands
706 aren't constant. */
707 switch (code)
709 case NOT:
710 /* (not (not X)) == X. */
711 if (GET_CODE (op) == NOT)
712 return XEXP (op, 0);
714 /* (not (eq X Y)) == (ne X Y), etc. */
715 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
716 && ((reversed = reversed_comparison_code (op, NULL_RTX))
717 != UNKNOWN))
718 return gen_rtx_fmt_ee (reversed,
719 op_mode, XEXP (op, 0), XEXP (op, 1));
720 break;
722 case NEG:
723 /* (neg (neg X)) == X. */
724 if (GET_CODE (op) == NEG)
725 return XEXP (op, 0);
726 break;
728 case SIGN_EXTEND:
729 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
730 becomes just the MINUS if its mode is MODE. This allows
731 folding switch statements on machines using casesi (such as
732 the VAX). */
733 if (GET_CODE (op) == TRUNCATE
734 && GET_MODE (XEXP (op, 0)) == mode
735 && GET_CODE (XEXP (op, 0)) == MINUS
736 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
737 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
738 return XEXP (op, 0);
740 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
741 if (! POINTERS_EXTEND_UNSIGNED
742 && mode == Pmode && GET_MODE (op) == ptr_mode
743 && (CONSTANT_P (op)
744 || (GET_CODE (op) == SUBREG
745 && GET_CODE (SUBREG_REG (op)) == REG
746 && REG_POINTER (SUBREG_REG (op))
747 && GET_MODE (SUBREG_REG (op)) == Pmode)))
748 return convert_memory_address (Pmode, op);
749 #endif
750 break;
752 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
753 case ZERO_EXTEND:
754 if (POINTERS_EXTEND_UNSIGNED > 0
755 && mode == Pmode && GET_MODE (op) == ptr_mode
756 && (CONSTANT_P (op)
757 || (GET_CODE (op) == SUBREG
758 && GET_CODE (SUBREG_REG (op)) == REG
759 && REG_POINTER (SUBREG_REG (op))
760 && GET_MODE (SUBREG_REG (op)) == Pmode)))
761 return convert_memory_address (Pmode, op);
762 break;
763 #endif
765 default:
766 break;
769 return 0;
773 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
774 /* Subroutine of simplify_binary_operation, called via do_float_handler.
775 Handles simplification of binary ops on floating point values. */
776 struct simplify_binary_real_args
778 rtx trueop0, trueop1;
779 rtx result;
780 enum rtx_code code;
781 enum machine_mode mode;
784 static void
785 simplify_binary_real (p)
786 PTR p;
788 REAL_VALUE_TYPE f0, f1, value;
789 struct simplify_binary_real_args *args =
790 (struct simplify_binary_real_args *) p;
792 REAL_VALUE_FROM_CONST_DOUBLE (f0, args->trueop0);
793 REAL_VALUE_FROM_CONST_DOUBLE (f1, args->trueop1);
794 f0 = real_value_truncate (args->mode, f0);
795 f1 = real_value_truncate (args->mode, f1);
797 #ifdef REAL_ARITHMETIC
798 #ifndef REAL_INFINITY
799 if (args->code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
801 args->result = 0;
802 return;
804 #endif
805 REAL_ARITHMETIC (value, rtx_to_tree_code (args->code), f0, f1);
806 #else
807 switch (args->code)
809 case PLUS:
810 value = f0 + f1;
811 break;
812 case MINUS:
813 value = f0 - f1;
814 break;
815 case MULT:
816 value = f0 * f1;
817 break;
818 case DIV:
819 #ifndef REAL_INFINITY
820 if (f1 == 0)
821 return 0;
822 #endif
823 value = f0 / f1;
824 break;
825 case SMIN:
826 value = MIN (f0, f1);
827 break;
828 case SMAX:
829 value = MAX (f0, f1);
830 break;
831 default:
832 abort ();
834 #endif
836 value = real_value_truncate (args->mode, value);
837 args->result = CONST_DOUBLE_FROM_REAL_VALUE (value, args->mode);
839 #endif
841 /* Another subroutine called via do_float_handler. This one tests
842 the floating point value given against 2. and -1. */
843 struct simplify_binary_is2orm1_args
845 rtx value;
846 bool is_2;
847 bool is_m1;
850 static void
851 simplify_binary_is2orm1 (p)
852 PTR p;
854 REAL_VALUE_TYPE d;
855 struct simplify_binary_is2orm1_args *args =
856 (struct simplify_binary_is2orm1_args *) p;
858 REAL_VALUE_FROM_CONST_DOUBLE (d, args->value);
859 args->is_2 = REAL_VALUES_EQUAL (d, dconst2);
860 args->is_m1 = REAL_VALUES_EQUAL (d, dconstm1);
863 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
864 and OP1. Return 0 if no simplification is possible.
866 Don't use this for relational operations such as EQ or LT.
867 Use simplify_relational_operation instead. */
869 simplify_binary_operation (code, mode, op0, op1)
870 enum rtx_code code;
871 enum machine_mode mode;
872 rtx op0, op1;
874 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
875 HOST_WIDE_INT val;
876 unsigned int width = GET_MODE_BITSIZE (mode);
877 rtx tem;
878 rtx trueop0 = avoid_constant_pool_reference (op0);
879 rtx trueop1 = avoid_constant_pool_reference (op1);
881 /* Relational operations don't work here. We must know the mode
882 of the operands in order to do the comparison correctly.
883 Assuming a full word can give incorrect results.
884 Consider comparing 128 with -128 in QImode. */
886 if (GET_RTX_CLASS (code) == '<')
887 abort ();
889 /* Make sure the constant is second. */
890 if (GET_RTX_CLASS (code) == 'c'
891 && swap_commutative_operands_p (trueop0, trueop1))
893 tem = op0, op0 = op1, op1 = tem;
894 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
897 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
898 if (GET_MODE_CLASS (mode) == MODE_FLOAT
899 && GET_CODE (trueop0) == CONST_DOUBLE
900 && GET_CODE (trueop1) == CONST_DOUBLE
901 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
903 struct simplify_binary_real_args args;
904 args.trueop0 = trueop0;
905 args.trueop1 = trueop1;
906 args.mode = mode;
907 args.code = code;
909 if (do_float_handler (simplify_binary_real, (PTR) &args))
910 return args.result;
911 return 0;
913 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
915 /* We can fold some multi-word operations. */
916 if (GET_MODE_CLASS (mode) == MODE_INT
917 && width == HOST_BITS_PER_WIDE_INT * 2
918 && (GET_CODE (trueop0) == CONST_DOUBLE
919 || GET_CODE (trueop0) == CONST_INT)
920 && (GET_CODE (trueop1) == CONST_DOUBLE
921 || GET_CODE (trueop1) == CONST_INT))
923 unsigned HOST_WIDE_INT l1, l2, lv;
924 HOST_WIDE_INT h1, h2, hv;
926 if (GET_CODE (trueop0) == CONST_DOUBLE)
927 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
928 else
929 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
931 if (GET_CODE (trueop1) == CONST_DOUBLE)
932 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
933 else
934 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
936 switch (code)
938 case MINUS:
939 /* A - B == A + (-B). */
940 neg_double (l2, h2, &lv, &hv);
941 l2 = lv, h2 = hv;
943 /* .. fall through ... */
945 case PLUS:
946 add_double (l1, h1, l2, h2, &lv, &hv);
947 break;
949 case MULT:
950 mul_double (l1, h1, l2, h2, &lv, &hv);
951 break;
953 case DIV: case MOD: case UDIV: case UMOD:
954 /* We'd need to include tree.h to do this and it doesn't seem worth
955 it. */
956 return 0;
958 case AND:
959 lv = l1 & l2, hv = h1 & h2;
960 break;
962 case IOR:
963 lv = l1 | l2, hv = h1 | h2;
964 break;
966 case XOR:
967 lv = l1 ^ l2, hv = h1 ^ h2;
968 break;
970 case SMIN:
971 if (h1 < h2
972 || (h1 == h2
973 && ((unsigned HOST_WIDE_INT) l1
974 < (unsigned HOST_WIDE_INT) l2)))
975 lv = l1, hv = h1;
976 else
977 lv = l2, hv = h2;
978 break;
980 case SMAX:
981 if (h1 > h2
982 || (h1 == h2
983 && ((unsigned HOST_WIDE_INT) l1
984 > (unsigned HOST_WIDE_INT) l2)))
985 lv = l1, hv = h1;
986 else
987 lv = l2, hv = h2;
988 break;
990 case UMIN:
991 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
992 || (h1 == h2
993 && ((unsigned HOST_WIDE_INT) l1
994 < (unsigned HOST_WIDE_INT) l2)))
995 lv = l1, hv = h1;
996 else
997 lv = l2, hv = h2;
998 break;
1000 case UMAX:
1001 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1002 || (h1 == h2
1003 && ((unsigned HOST_WIDE_INT) l1
1004 > (unsigned HOST_WIDE_INT) l2)))
1005 lv = l1, hv = h1;
1006 else
1007 lv = l2, hv = h2;
1008 break;
1010 case LSHIFTRT: case ASHIFTRT:
1011 case ASHIFT:
1012 case ROTATE: case ROTATERT:
1013 #ifdef SHIFT_COUNT_TRUNCATED
1014 if (SHIFT_COUNT_TRUNCATED)
1015 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1016 #endif
1018 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1019 return 0;
1021 if (code == LSHIFTRT || code == ASHIFTRT)
1022 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1023 code == ASHIFTRT);
1024 else if (code == ASHIFT)
1025 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1026 else if (code == ROTATE)
1027 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1028 else /* code == ROTATERT */
1029 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1030 break;
1032 default:
1033 return 0;
1036 return immed_double_const (lv, hv, mode);
1039 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1040 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1042 /* Even if we can't compute a constant result,
1043 there are some cases worth simplifying. */
1045 switch (code)
1047 case PLUS:
1048 /* In IEEE floating point, x+0 is not the same as x. Similarly
1049 for the other optimizations below. */
1050 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1051 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1052 break;
1054 if (trueop1 == CONST0_RTX (mode))
1055 return op0;
1057 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
1058 if (GET_CODE (op0) == NEG)
1059 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1060 else if (GET_CODE (op1) == NEG)
1061 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1063 /* (~a) + 1 -> -a */
1064 if (INTEGRAL_MODE_P (mode)
1065 && GET_CODE (op0) == NOT
1066 && trueop1 == const1_rtx)
1067 return gen_rtx_NEG (mode, XEXP (op0, 0));
1069 /* Handle both-operands-constant cases. We can only add
1070 CONST_INTs to constants since the sum of relocatable symbols
1071 can't be handled by most assemblers. Don't add CONST_INT
1072 to CONST_INT since overflow won't be computed properly if wider
1073 than HOST_BITS_PER_WIDE_INT. */
1075 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1076 && GET_CODE (op1) == CONST_INT)
1077 return plus_constant (op0, INTVAL (op1));
1078 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1079 && GET_CODE (op0) == CONST_INT)
1080 return plus_constant (op1, INTVAL (op0));
1082 /* See if this is something like X * C - X or vice versa or
1083 if the multiplication is written as a shift. If so, we can
1084 distribute and make a new multiply, shift, or maybe just
1085 have X (if C is 2 in the example above). But don't make
1086 real multiply if we didn't have one before. */
1088 if (! FLOAT_MODE_P (mode))
1090 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1091 rtx lhs = op0, rhs = op1;
1092 int had_mult = 0;
1094 if (GET_CODE (lhs) == NEG)
1095 coeff0 = -1, lhs = XEXP (lhs, 0);
1096 else if (GET_CODE (lhs) == MULT
1097 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1099 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1100 had_mult = 1;
1102 else if (GET_CODE (lhs) == ASHIFT
1103 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1104 && INTVAL (XEXP (lhs, 1)) >= 0
1105 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1107 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1108 lhs = XEXP (lhs, 0);
1111 if (GET_CODE (rhs) == NEG)
1112 coeff1 = -1, rhs = XEXP (rhs, 0);
1113 else if (GET_CODE (rhs) == MULT
1114 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1116 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1117 had_mult = 1;
1119 else if (GET_CODE (rhs) == ASHIFT
1120 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1121 && INTVAL (XEXP (rhs, 1)) >= 0
1122 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1124 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1125 rhs = XEXP (rhs, 0);
1128 if (rtx_equal_p (lhs, rhs))
1130 tem = simplify_gen_binary (MULT, mode, lhs,
1131 GEN_INT (coeff0 + coeff1));
1132 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1136 /* If one of the operands is a PLUS or a MINUS, see if we can
1137 simplify this by the associative law.
1138 Don't use the associative law for floating point.
1139 The inaccuracy makes it nonassociative,
1140 and subtle programs can break if operations are associated. */
1142 if (INTEGRAL_MODE_P (mode)
1143 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1144 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1145 || (GET_CODE (op0) == CONST
1146 && GET_CODE (XEXP (op0, 0)) == PLUS)
1147 || (GET_CODE (op1) == CONST
1148 && GET_CODE (XEXP (op1, 0)) == PLUS))
1149 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1150 return tem;
1151 break;
1153 case COMPARE:
1154 #ifdef HAVE_cc0
1155 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1156 using cc0, in which case we want to leave it as a COMPARE
1157 so we can distinguish it from a register-register-copy.
1159 In IEEE floating point, x-0 is not the same as x. */
1161 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1162 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1163 && trueop1 == CONST0_RTX (mode))
1164 return op0;
1165 #endif
1167 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1168 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1169 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1170 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1172 rtx xop00 = XEXP (op0, 0);
1173 rtx xop10 = XEXP (op1, 0);
1175 #ifdef HAVE_cc0
1176 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1177 #else
1178 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1179 && GET_MODE (xop00) == GET_MODE (xop10)
1180 && REGNO (xop00) == REGNO (xop10)
1181 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1182 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1183 #endif
1184 return xop00;
1186 break;
1188 case MINUS:
1189 /* None of these optimizations can be done for IEEE
1190 floating point. */
1191 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1192 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1193 break;
1195 /* We can't assume x-x is 0 even with non-IEEE floating point,
1196 but since it is zero except in very strange circumstances, we
1197 will treat it as zero with -funsafe-math-optimizations. */
1198 if (rtx_equal_p (trueop0, trueop1)
1199 && ! side_effects_p (op0)
1200 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1201 return CONST0_RTX (mode);
1203 /* Change subtraction from zero into negation. */
1204 if (trueop0 == CONST0_RTX (mode))
1205 return gen_rtx_NEG (mode, op1);
1207 /* (-1 - a) is ~a. */
1208 if (trueop0 == constm1_rtx)
1209 return gen_rtx_NOT (mode, op1);
1211 /* Subtracting 0 has no effect. */
1212 if (trueop1 == CONST0_RTX (mode))
1213 return op0;
1215 /* See if this is something like X * C - X or vice versa or
1216 if the multiplication is written as a shift. If so, we can
1217 distribute and make a new multiply, shift, or maybe just
1218 have X (if C is 2 in the example above). But don't make
1219 real multiply if we didn't have one before. */
1221 if (! FLOAT_MODE_P (mode))
1223 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1224 rtx lhs = op0, rhs = op1;
1225 int had_mult = 0;
1227 if (GET_CODE (lhs) == NEG)
1228 coeff0 = -1, lhs = XEXP (lhs, 0);
1229 else if (GET_CODE (lhs) == MULT
1230 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1232 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1233 had_mult = 1;
1235 else if (GET_CODE (lhs) == ASHIFT
1236 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1237 && INTVAL (XEXP (lhs, 1)) >= 0
1238 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1240 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1241 lhs = XEXP (lhs, 0);
1244 if (GET_CODE (rhs) == NEG)
1245 coeff1 = - 1, rhs = XEXP (rhs, 0);
1246 else if (GET_CODE (rhs) == MULT
1247 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1249 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1250 had_mult = 1;
1252 else if (GET_CODE (rhs) == ASHIFT
1253 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1254 && INTVAL (XEXP (rhs, 1)) >= 0
1255 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1257 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1258 rhs = XEXP (rhs, 0);
1261 if (rtx_equal_p (lhs, rhs))
1263 tem = simplify_gen_binary (MULT, mode, lhs,
1264 GEN_INT (coeff0 - coeff1));
1265 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1269 /* (a - (-b)) -> (a + b). */
1270 if (GET_CODE (op1) == NEG)
1271 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1273 /* If one of the operands is a PLUS or a MINUS, see if we can
1274 simplify this by the associative law.
1275 Don't use the associative law for floating point.
1276 The inaccuracy makes it nonassociative,
1277 and subtle programs can break if operations are associated. */
1279 if (INTEGRAL_MODE_P (mode)
1280 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1281 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1282 || (GET_CODE (op0) == CONST
1283 && GET_CODE (XEXP (op0, 0)) == PLUS)
1284 || (GET_CODE (op1) == CONST
1285 && GET_CODE (XEXP (op1, 0)) == PLUS))
1286 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1287 return tem;
1289 /* Don't let a relocatable value get a negative coeff. */
1290 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1291 return simplify_gen_binary (PLUS, mode,
1292 op0,
1293 neg_const_int (mode, op1));
1295 /* (x - (x & y)) -> (x & ~y) */
1296 if (GET_CODE (op1) == AND)
1298 if (rtx_equal_p (op0, XEXP (op1, 0)))
1299 return simplify_gen_binary (AND, mode, op0,
1300 gen_rtx_NOT (mode, XEXP (op1, 1)));
1301 if (rtx_equal_p (op0, XEXP (op1, 1)))
1302 return simplify_gen_binary (AND, mode, op0,
1303 gen_rtx_NOT (mode, XEXP (op1, 0)));
1305 break;
1307 case MULT:
1308 if (trueop1 == constm1_rtx)
1310 tem = simplify_unary_operation (NEG, mode, op0, mode);
1312 return tem ? tem : gen_rtx_NEG (mode, op0);
1315 /* In IEEE floating point, x*0 is not always 0. */
1316 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1317 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1318 && trueop1 == CONST0_RTX (mode)
1319 && ! side_effects_p (op0))
1320 return op1;
1322 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1323 However, ANSI says we can drop signals,
1324 so we can do this anyway. */
1325 if (trueop1 == CONST1_RTX (mode))
1326 return op0;
1328 /* Convert multiply by constant power of two into shift unless
1329 we are still generating RTL. This test is a kludge. */
1330 if (GET_CODE (trueop1) == CONST_INT
1331 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1332 /* If the mode is larger than the host word size, and the
1333 uppermost bit is set, then this isn't a power of two due
1334 to implicit sign extension. */
1335 && (width <= HOST_BITS_PER_WIDE_INT
1336 || val != HOST_BITS_PER_WIDE_INT - 1)
1337 && ! rtx_equal_function_value_matters)
1338 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1340 if (GET_CODE (trueop1) == CONST_DOUBLE
1341 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT)
1343 struct simplify_binary_is2orm1_args args;
1345 args.value = trueop1;
1346 if (! do_float_handler (simplify_binary_is2orm1, (PTR) &args))
1347 return 0;
1349 /* x*2 is x+x and x*(-1) is -x */
1350 if (args.is_2 && GET_MODE (op0) == mode)
1351 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1353 else if (args.is_m1 && GET_MODE (op0) == mode)
1354 return gen_rtx_NEG (mode, op0);
1356 break;
1358 case IOR:
1359 if (trueop1 == const0_rtx)
1360 return op0;
1361 if (GET_CODE (trueop1) == CONST_INT
1362 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1363 == GET_MODE_MASK (mode)))
1364 return op1;
1365 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1366 return op0;
1367 /* A | (~A) -> -1 */
1368 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1369 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1370 && ! side_effects_p (op0)
1371 && GET_MODE_CLASS (mode) != MODE_CC)
1372 return constm1_rtx;
1373 break;
1375 case XOR:
1376 if (trueop1 == const0_rtx)
1377 return op0;
1378 if (GET_CODE (trueop1) == CONST_INT
1379 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1380 == GET_MODE_MASK (mode)))
1381 return gen_rtx_NOT (mode, op0);
1382 if (trueop0 == trueop1 && ! side_effects_p (op0)
1383 && GET_MODE_CLASS (mode) != MODE_CC)
1384 return const0_rtx;
1385 break;
1387 case AND:
1388 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1389 return const0_rtx;
1390 if (GET_CODE (trueop1) == CONST_INT
1391 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1392 == GET_MODE_MASK (mode)))
1393 return op0;
1394 if (trueop0 == trueop1 && ! side_effects_p (op0)
1395 && GET_MODE_CLASS (mode) != MODE_CC)
1396 return op0;
1397 /* A & (~A) -> 0 */
1398 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1399 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1400 && ! side_effects_p (op0)
1401 && GET_MODE_CLASS (mode) != MODE_CC)
1402 return const0_rtx;
1403 break;
1405 case UDIV:
1406 /* Convert divide by power of two into shift (divide by 1 handled
1407 below). */
1408 if (GET_CODE (trueop1) == CONST_INT
1409 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1410 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1412 /* ... fall through ... */
1414 case DIV:
1415 if (trueop1 == CONST1_RTX (mode))
1417 /* On some platforms DIV uses narrower mode than its
1418 operands. */
1419 rtx x = gen_lowpart_common (mode, op0);
1420 if (x)
1421 return x;
1422 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1423 return gen_lowpart_SUBREG (mode, op0);
1424 else
1425 return op0;
1428 /* In IEEE floating point, 0/x is not always 0. */
1429 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1430 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1431 && trueop0 == CONST0_RTX (mode)
1432 && ! side_effects_p (op1))
1433 return op0;
1435 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1436 /* Change division by a constant into multiplication. Only do
1437 this with -funsafe-math-optimizations. */
1438 else if (GET_CODE (trueop1) == CONST_DOUBLE
1439 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1440 && trueop1 != CONST0_RTX (mode)
1441 && flag_unsafe_math_optimizations)
1443 REAL_VALUE_TYPE d;
1444 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1446 if (! REAL_VALUES_EQUAL (d, dconst0))
1448 #if defined (REAL_ARITHMETIC)
1449 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1450 return gen_rtx_MULT (mode, op0,
1451 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1452 #else
1453 return
1454 gen_rtx_MULT (mode, op0,
1455 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1456 #endif
1459 #endif
1460 break;
1462 case UMOD:
1463 /* Handle modulus by power of two (mod with 1 handled below). */
1464 if (GET_CODE (trueop1) == CONST_INT
1465 && exact_log2 (INTVAL (trueop1)) > 0)
1466 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1468 /* ... fall through ... */
1470 case MOD:
1471 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1472 && ! side_effects_p (op0) && ! side_effects_p (op1))
1473 return const0_rtx;
1474 break;
1476 case ROTATERT:
1477 case ROTATE:
1478 /* Rotating ~0 always results in ~0. */
1479 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1480 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1481 && ! side_effects_p (op1))
1482 return op0;
1484 /* ... fall through ... */
1486 case ASHIFT:
1487 case ASHIFTRT:
1488 case LSHIFTRT:
1489 if (trueop1 == const0_rtx)
1490 return op0;
1491 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1492 return op0;
1493 break;
1495 case SMIN:
1496 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1497 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1498 && ! side_effects_p (op0))
1499 return op1;
1500 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1501 return op0;
1502 break;
1504 case SMAX:
1505 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1506 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1507 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1508 && ! side_effects_p (op0))
1509 return op1;
1510 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1511 return op0;
1512 break;
1514 case UMIN:
1515 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1516 return op1;
1517 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1518 return op0;
1519 break;
1521 case UMAX:
1522 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1523 return op1;
1524 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1525 return op0;
1526 break;
1528 case SS_PLUS:
1529 case US_PLUS:
1530 case SS_MINUS:
1531 case US_MINUS:
1532 /* ??? There are simplifications that can be done. */
1533 return 0;
1535 default:
1536 abort ();
1539 return 0;
1542 /* Get the integer argument values in two forms:
1543 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1545 arg0 = INTVAL (trueop0);
1546 arg1 = INTVAL (trueop1);
1548 if (width < HOST_BITS_PER_WIDE_INT)
1550 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1551 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1553 arg0s = arg0;
1554 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1555 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1557 arg1s = arg1;
1558 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1559 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1561 else
1563 arg0s = arg0;
1564 arg1s = arg1;
1567 /* Compute the value of the arithmetic. */
1569 switch (code)
1571 case PLUS:
1572 val = arg0s + arg1s;
1573 break;
1575 case MINUS:
1576 val = arg0s - arg1s;
1577 break;
1579 case MULT:
1580 val = arg0s * arg1s;
1581 break;
1583 case DIV:
1584 if (arg1s == 0
1585 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1586 && arg1s == -1))
1587 return 0;
1588 val = arg0s / arg1s;
1589 break;
1591 case MOD:
1592 if (arg1s == 0
1593 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1594 && arg1s == -1))
1595 return 0;
1596 val = arg0s % arg1s;
1597 break;
1599 case UDIV:
1600 if (arg1 == 0
1601 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1602 && arg1s == -1))
1603 return 0;
1604 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1605 break;
1607 case UMOD:
1608 if (arg1 == 0
1609 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1610 && arg1s == -1))
1611 return 0;
1612 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1613 break;
1615 case AND:
1616 val = arg0 & arg1;
1617 break;
1619 case IOR:
1620 val = arg0 | arg1;
1621 break;
1623 case XOR:
1624 val = arg0 ^ arg1;
1625 break;
1627 case LSHIFTRT:
1628 /* If shift count is undefined, don't fold it; let the machine do
1629 what it wants. But truncate it if the machine will do that. */
1630 if (arg1 < 0)
1631 return 0;
1633 #ifdef SHIFT_COUNT_TRUNCATED
1634 if (SHIFT_COUNT_TRUNCATED)
1635 arg1 %= width;
1636 #endif
1638 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1639 break;
1641 case ASHIFT:
1642 if (arg1 < 0)
1643 return 0;
1645 #ifdef SHIFT_COUNT_TRUNCATED
1646 if (SHIFT_COUNT_TRUNCATED)
1647 arg1 %= width;
1648 #endif
1650 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1651 break;
1653 case ASHIFTRT:
1654 if (arg1 < 0)
1655 return 0;
1657 #ifdef SHIFT_COUNT_TRUNCATED
1658 if (SHIFT_COUNT_TRUNCATED)
1659 arg1 %= width;
1660 #endif
1662 val = arg0s >> arg1;
1664 /* Bootstrap compiler may not have sign extended the right shift.
1665 Manually extend the sign to insure bootstrap cc matches gcc. */
1666 if (arg0s < 0 && arg1 > 0)
1667 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1669 break;
1671 case ROTATERT:
1672 if (arg1 < 0)
1673 return 0;
1675 arg1 %= width;
1676 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1677 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1678 break;
1680 case ROTATE:
1681 if (arg1 < 0)
1682 return 0;
1684 arg1 %= width;
1685 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1686 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1687 break;
1689 case COMPARE:
1690 /* Do nothing here. */
1691 return 0;
1693 case SMIN:
1694 val = arg0s <= arg1s ? arg0s : arg1s;
1695 break;
1697 case UMIN:
1698 val = ((unsigned HOST_WIDE_INT) arg0
1699 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1700 break;
1702 case SMAX:
1703 val = arg0s > arg1s ? arg0s : arg1s;
1704 break;
1706 case UMAX:
1707 val = ((unsigned HOST_WIDE_INT) arg0
1708 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1709 break;
1711 default:
1712 abort ();
1715 val = trunc_int_for_mode (val, mode);
1717 return GEN_INT (val);
1720 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1721 PLUS or MINUS.
1723 Rather than test for specific case, we do this by a brute-force method
1724 and do all possible simplifications until no more changes occur. Then
1725 we rebuild the operation.
1727 If FORCE is true, then always generate the rtx. This is used to
1728 canonicalize stuff emitted from simplify_gen_binary. */
1730 struct simplify_plus_minus_op_data
1732 rtx op;
1733 int neg;
1736 static int
1737 simplify_plus_minus_op_data_cmp (p1, p2)
1738 const void *p1;
1739 const void *p2;
1741 const struct simplify_plus_minus_op_data *d1 = p1;
1742 const struct simplify_plus_minus_op_data *d2 = p2;
1744 return (commutative_operand_precedence (d2->op)
1745 - commutative_operand_precedence (d1->op));
1748 static rtx
1749 simplify_plus_minus (code, mode, op0, op1, force)
1750 enum rtx_code code;
1751 enum machine_mode mode;
1752 rtx op0, op1;
1753 int force;
1755 struct simplify_plus_minus_op_data ops[8];
1756 rtx result, tem;
1757 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1758 int first, negate, changed;
1759 int i, j;
1761 memset ((char *) ops, 0, sizeof ops);
1763 /* Set up the two operands and then expand them until nothing has been
1764 changed. If we run out of room in our array, give up; this should
1765 almost never happen. */
1767 ops[0].op = op0;
1768 ops[0].neg = 0;
1769 ops[1].op = op1;
1770 ops[1].neg = (code == MINUS);
1774 changed = 0;
1776 for (i = 0; i < n_ops; i++)
1778 rtx this_op = ops[i].op;
1779 int this_neg = ops[i].neg;
1780 enum rtx_code this_code = GET_CODE (this_op);
1782 switch (this_code)
1784 case PLUS:
1785 case MINUS:
1786 if (n_ops == 7)
1788 if (force)
1789 abort ();
1790 return NULL_RTX;
1793 ops[n_ops].op = XEXP (this_op, 1);
1794 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1795 n_ops++;
1797 ops[i].op = XEXP (this_op, 0);
1798 input_ops++;
1799 changed = 1;
1800 break;
1802 case NEG:
1803 ops[i].op = XEXP (this_op, 0);
1804 ops[i].neg = ! this_neg;
1805 changed = 1;
1806 break;
1808 case CONST:
1809 if (n_ops < 7
1810 && GET_CODE (XEXP (this_op, 0)) == PLUS
1811 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1812 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1814 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1815 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1816 ops[n_ops].neg = this_neg;
1817 n_ops++;
1818 input_consts++;
1819 changed = 1;
1821 break;
1823 case NOT:
1824 /* ~a -> (-a - 1) */
1825 if (n_ops != 7)
1827 ops[n_ops].op = constm1_rtx;
1828 ops[n_ops++].neg = this_neg;
1829 ops[i].op = XEXP (this_op, 0);
1830 ops[i].neg = !this_neg;
1831 changed = 1;
1833 break;
1835 case CONST_INT:
1836 if (this_neg)
1838 ops[i].op = neg_const_int (mode, this_op);
1839 ops[i].neg = 0;
1840 changed = 1;
1842 break;
1844 default:
1845 break;
1849 while (changed);
1851 /* If we only have two operands, we can't do anything. */
1852 if (n_ops <= 2 && !force)
1853 return NULL_RTX;
1855 /* Count the number of CONSTs we didn't split above. */
1856 for (i = 0; i < n_ops; i++)
1857 if (GET_CODE (ops[i].op) == CONST)
1858 input_consts++;
1860 /* Now simplify each pair of operands until nothing changes. The first
1861 time through just simplify constants against each other. */
1863 first = 1;
1866 changed = first;
1868 for (i = 0; i < n_ops - 1; i++)
1869 for (j = i + 1; j < n_ops; j++)
1871 rtx lhs = ops[i].op, rhs = ops[j].op;
1872 int lneg = ops[i].neg, rneg = ops[j].neg;
1874 if (lhs != 0 && rhs != 0
1875 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1877 enum rtx_code ncode = PLUS;
1879 if (lneg != rneg)
1881 ncode = MINUS;
1882 if (lneg)
1883 tem = lhs, lhs = rhs, rhs = tem;
1885 else if (swap_commutative_operands_p (lhs, rhs))
1886 tem = lhs, lhs = rhs, rhs = tem;
1888 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1890 /* Reject "simplifications" that just wrap the two
1891 arguments in a CONST. Failure to do so can result
1892 in infinite recursion with simplify_binary_operation
1893 when it calls us to simplify CONST operations. */
1894 if (tem
1895 && ! (GET_CODE (tem) == CONST
1896 && GET_CODE (XEXP (tem, 0)) == ncode
1897 && XEXP (XEXP (tem, 0), 0) == lhs
1898 && XEXP (XEXP (tem, 0), 1) == rhs)
1899 /* Don't allow -x + -1 -> ~x simplifications in the
1900 first pass. This allows us the chance to combine
1901 the -1 with other constants. */
1902 && ! (first
1903 && GET_CODE (tem) == NOT
1904 && XEXP (tem, 0) == rhs))
1906 lneg &= rneg;
1907 if (GET_CODE (tem) == NEG)
1908 tem = XEXP (tem, 0), lneg = !lneg;
1909 if (GET_CODE (tem) == CONST_INT && lneg)
1910 tem = neg_const_int (mode, tem), lneg = 0;
1912 ops[i].op = tem;
1913 ops[i].neg = lneg;
1914 ops[j].op = NULL_RTX;
1915 changed = 1;
1920 first = 0;
1922 while (changed);
1924 /* Pack all the operands to the lower-numbered entries. */
1925 for (i = 0, j = 0; j < n_ops; j++)
1926 if (ops[j].op)
1927 ops[i++] = ops[j];
1928 n_ops = i;
1930 /* Sort the operations based on swap_commutative_operands_p. */
1931 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1933 /* We suppressed creation of trivial CONST expressions in the
1934 combination loop to avoid recursion. Create one manually now.
1935 The combination loop should have ensured that there is exactly
1936 one CONST_INT, and the sort will have ensured that it is last
1937 in the array and that any other constant will be next-to-last. */
1939 if (n_ops > 1
1940 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1941 && CONSTANT_P (ops[n_ops - 2].op))
1943 rtx value = ops[n_ops - 1].op;
1944 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1945 value = neg_const_int (mode, value);
1946 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1947 n_ops--;
1950 /* Count the number of CONSTs that we generated. */
1951 n_consts = 0;
1952 for (i = 0; i < n_ops; i++)
1953 if (GET_CODE (ops[i].op) == CONST)
1954 n_consts++;
1956 /* Give up if we didn't reduce the number of operands we had. Make
1957 sure we count a CONST as two operands. If we have the same
1958 number of operands, but have made more CONSTs than before, this
1959 is also an improvement, so accept it. */
1960 if (!force
1961 && (n_ops + n_consts > input_ops
1962 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1963 return NULL_RTX;
1965 /* Put a non-negated operand first. If there aren't any, make all
1966 operands positive and negate the whole thing later. */
1968 negate = 0;
1969 for (i = 0; i < n_ops && ops[i].neg; i++)
1970 continue;
1971 if (i == n_ops)
1973 for (i = 0; i < n_ops; i++)
1974 ops[i].neg = 0;
1975 negate = 1;
1977 else if (i != 0)
1979 tem = ops[0].op;
1980 ops[0] = ops[i];
1981 ops[i].op = tem;
1982 ops[i].neg = 1;
1985 /* Now make the result by performing the requested operations. */
1986 result = ops[0].op;
1987 for (i = 1; i < n_ops; i++)
1988 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1989 mode, result, ops[i].op);
1991 return negate ? gen_rtx_NEG (mode, result) : result;
1994 struct cfc_args
1996 rtx op0, op1; /* Input */
1997 int equal, op0lt, op1lt; /* Output */
1998 int unordered;
2001 static void
2002 check_fold_consts (data)
2003 PTR data;
2005 struct cfc_args *args = (struct cfc_args *) data;
2006 REAL_VALUE_TYPE d0, d1;
2008 /* We may possibly raise an exception while reading the value. */
2009 args->unordered = 1;
2010 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
2011 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
2013 /* Comparisons of Inf versus Inf are ordered. */
2014 if (REAL_VALUE_ISNAN (d0)
2015 || REAL_VALUE_ISNAN (d1))
2016 return;
2017 args->equal = REAL_VALUES_EQUAL (d0, d1);
2018 args->op0lt = REAL_VALUES_LESS (d0, d1);
2019 args->op1lt = REAL_VALUES_LESS (d1, d0);
2020 args->unordered = 0;
2023 /* Like simplify_binary_operation except used for relational operators.
2024 MODE is the mode of the operands, not that of the result. If MODE
2025 is VOIDmode, both operands must also be VOIDmode and we compare the
2026 operands in "infinite precision".
2028 If no simplification is possible, this function returns zero. Otherwise,
2029 it returns either const_true_rtx or const0_rtx. */
2032 simplify_relational_operation (code, mode, op0, op1)
2033 enum rtx_code code;
2034 enum machine_mode mode;
2035 rtx op0, op1;
2037 int equal, op0lt, op0ltu, op1lt, op1ltu;
2038 rtx tem;
2039 rtx trueop0;
2040 rtx trueop1;
2042 if (mode == VOIDmode
2043 && (GET_MODE (op0) != VOIDmode
2044 || GET_MODE (op1) != VOIDmode))
2045 abort ();
2047 /* If op0 is a compare, extract the comparison arguments from it. */
2048 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2049 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2051 trueop0 = avoid_constant_pool_reference (op0);
2052 trueop1 = avoid_constant_pool_reference (op1);
2054 /* We can't simplify MODE_CC values since we don't know what the
2055 actual comparison is. */
2056 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
2057 #ifdef HAVE_cc0
2058 || op0 == cc0_rtx
2059 #endif
2061 return 0;
2063 /* Make sure the constant is second. */
2064 if (swap_commutative_operands_p (trueop0, trueop1))
2066 tem = op0, op0 = op1, op1 = tem;
2067 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2068 code = swap_condition (code);
2071 /* For integer comparisons of A and B maybe we can simplify A - B and can
2072 then simplify a comparison of that with zero. If A and B are both either
2073 a register or a CONST_INT, this can't help; testing for these cases will
2074 prevent infinite recursion here and speed things up.
2076 If CODE is an unsigned comparison, then we can never do this optimization,
2077 because it gives an incorrect result if the subtraction wraps around zero.
2078 ANSI C defines unsigned operations such that they never overflow, and
2079 thus such cases can not be ignored. */
2081 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2082 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2083 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2084 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2085 && code != GTU && code != GEU && code != LTU && code != LEU)
2086 return simplify_relational_operation (signed_condition (code),
2087 mode, tem, const0_rtx);
2089 if (flag_unsafe_math_optimizations && code == ORDERED)
2090 return const_true_rtx;
2092 if (flag_unsafe_math_optimizations && code == UNORDERED)
2093 return const0_rtx;
2095 /* For non-IEEE floating-point, if the two operands are equal, we know the
2096 result. */
2097 if (rtx_equal_p (trueop0, trueop1)
2098 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
2099 || ! FLOAT_MODE_P (GET_MODE (trueop0))
2100 || flag_unsafe_math_optimizations))
2101 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2103 /* If the operands are floating-point constants, see if we can fold
2104 the result. */
2105 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2106 else if (GET_CODE (trueop0) == CONST_DOUBLE
2107 && GET_CODE (trueop1) == CONST_DOUBLE
2108 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2110 struct cfc_args args;
2112 /* Setup input for check_fold_consts() */
2113 args.op0 = trueop0;
2114 args.op1 = trueop1;
2117 if (!do_float_handler (check_fold_consts, (PTR) &args))
2118 args.unordered = 1;
2120 if (args.unordered)
2121 switch (code)
2123 case UNEQ:
2124 case UNLT:
2125 case UNGT:
2126 case UNLE:
2127 case UNGE:
2128 case NE:
2129 case UNORDERED:
2130 return const_true_rtx;
2131 case EQ:
2132 case LT:
2133 case GT:
2134 case LE:
2135 case GE:
2136 case LTGT:
2137 case ORDERED:
2138 return const0_rtx;
2139 default:
2140 return 0;
2143 /* Receive output from check_fold_consts() */
2144 equal = args.equal;
2145 op0lt = op0ltu = args.op0lt;
2146 op1lt = op1ltu = args.op1lt;
2148 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2150 /* Otherwise, see if the operands are both integers. */
2151 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2152 && (GET_CODE (trueop0) == CONST_DOUBLE
2153 || GET_CODE (trueop0) == CONST_INT)
2154 && (GET_CODE (trueop1) == CONST_DOUBLE
2155 || GET_CODE (trueop1) == CONST_INT))
2157 int width = GET_MODE_BITSIZE (mode);
2158 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2159 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2161 /* Get the two words comprising each integer constant. */
2162 if (GET_CODE (trueop0) == CONST_DOUBLE)
2164 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2165 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2167 else
2169 l0u = l0s = INTVAL (trueop0);
2170 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2173 if (GET_CODE (trueop1) == CONST_DOUBLE)
2175 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2176 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2178 else
2180 l1u = l1s = INTVAL (trueop1);
2181 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2184 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2185 we have to sign or zero-extend the values. */
2186 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2188 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2189 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2191 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2192 l0s |= ((HOST_WIDE_INT) (-1) << width);
2194 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2195 l1s |= ((HOST_WIDE_INT) (-1) << width);
2197 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2198 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2200 equal = (h0u == h1u && l0u == l1u);
2201 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2202 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2203 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2204 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2207 /* Otherwise, there are some code-specific tests we can make. */
2208 else
2210 switch (code)
2212 case EQ:
2213 /* References to the frame plus a constant or labels cannot
2214 be zero, but a SYMBOL_REF can due to #pragma weak. */
2215 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2216 || GET_CODE (trueop0) == LABEL_REF)
2217 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2218 /* On some machines, the ap reg can be 0 sometimes. */
2219 && op0 != arg_pointer_rtx
2220 #endif
2222 return const0_rtx;
2223 break;
2225 case NE:
2226 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2227 || GET_CODE (trueop0) == LABEL_REF)
2228 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2229 && op0 != arg_pointer_rtx
2230 #endif
2232 return const_true_rtx;
2233 break;
2235 case GEU:
2236 /* Unsigned values are never negative. */
2237 if (trueop1 == const0_rtx)
2238 return const_true_rtx;
2239 break;
2241 case LTU:
2242 if (trueop1 == const0_rtx)
2243 return const0_rtx;
2244 break;
2246 case LEU:
2247 /* Unsigned values are never greater than the largest
2248 unsigned value. */
2249 if (GET_CODE (trueop1) == CONST_INT
2250 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2251 && INTEGRAL_MODE_P (mode))
2252 return const_true_rtx;
2253 break;
2255 case GTU:
2256 if (GET_CODE (trueop1) == CONST_INT
2257 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2258 && INTEGRAL_MODE_P (mode))
2259 return const0_rtx;
2260 break;
2262 default:
2263 break;
2266 return 0;
2269 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2270 as appropriate. */
2271 switch (code)
2273 case EQ:
2274 case UNEQ:
2275 return equal ? const_true_rtx : const0_rtx;
2276 case NE:
2277 case LTGT:
2278 return ! equal ? const_true_rtx : const0_rtx;
2279 case LT:
2280 case UNLT:
2281 return op0lt ? const_true_rtx : const0_rtx;
2282 case GT:
2283 case UNGT:
2284 return op1lt ? const_true_rtx : const0_rtx;
2285 case LTU:
2286 return op0ltu ? const_true_rtx : const0_rtx;
2287 case GTU:
2288 return op1ltu ? const_true_rtx : const0_rtx;
2289 case LE:
2290 case UNLE:
2291 return equal || op0lt ? const_true_rtx : const0_rtx;
2292 case GE:
2293 case UNGE:
2294 return equal || op1lt ? const_true_rtx : const0_rtx;
2295 case LEU:
2296 return equal || op0ltu ? const_true_rtx : const0_rtx;
2297 case GEU:
2298 return equal || op1ltu ? const_true_rtx : const0_rtx;
2299 case ORDERED:
2300 return const_true_rtx;
2301 case UNORDERED:
2302 return const0_rtx;
2303 default:
2304 abort ();
2308 /* Simplify CODE, an operation with result mode MODE and three operands,
2309 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2310 a constant. Return 0 if no simplifications is possible. */
2313 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2314 enum rtx_code code;
2315 enum machine_mode mode, op0_mode;
2316 rtx op0, op1, op2;
2318 unsigned int width = GET_MODE_BITSIZE (mode);
2320 /* VOIDmode means "infinite" precision. */
2321 if (width == 0)
2322 width = HOST_BITS_PER_WIDE_INT;
2324 switch (code)
2326 case SIGN_EXTRACT:
2327 case ZERO_EXTRACT:
2328 if (GET_CODE (op0) == CONST_INT
2329 && GET_CODE (op1) == CONST_INT
2330 && GET_CODE (op2) == CONST_INT
2331 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2332 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2334 /* Extracting a bit-field from a constant */
2335 HOST_WIDE_INT val = INTVAL (op0);
2337 if (BITS_BIG_ENDIAN)
2338 val >>= (GET_MODE_BITSIZE (op0_mode)
2339 - INTVAL (op2) - INTVAL (op1));
2340 else
2341 val >>= INTVAL (op2);
2343 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2345 /* First zero-extend. */
2346 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2347 /* If desired, propagate sign bit. */
2348 if (code == SIGN_EXTRACT
2349 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2350 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2353 /* Clear the bits that don't belong in our mode,
2354 unless they and our sign bit are all one.
2355 So we get either a reasonable negative value or a reasonable
2356 unsigned value for this mode. */
2357 if (width < HOST_BITS_PER_WIDE_INT
2358 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2359 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2360 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2362 return GEN_INT (val);
2364 break;
2366 case IF_THEN_ELSE:
2367 if (GET_CODE (op0) == CONST_INT)
2368 return op0 != const0_rtx ? op1 : op2;
2370 /* Convert a == b ? b : a to "a". */
2371 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2372 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2373 && rtx_equal_p (XEXP (op0, 0), op1)
2374 && rtx_equal_p (XEXP (op0, 1), op2))
2375 return op1;
2376 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2377 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2378 && rtx_equal_p (XEXP (op0, 1), op1)
2379 && rtx_equal_p (XEXP (op0, 0), op2))
2380 return op2;
2381 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2383 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2384 ? GET_MODE (XEXP (op0, 1))
2385 : GET_MODE (XEXP (op0, 0)));
2386 rtx temp;
2387 if (cmp_mode == VOIDmode)
2388 cmp_mode = op0_mode;
2389 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2390 XEXP (op0, 0), XEXP (op0, 1));
2392 /* See if any simplifications were possible. */
2393 if (temp == const0_rtx)
2394 return op2;
2395 else if (temp == const1_rtx)
2396 return op1;
2397 else if (temp)
2398 op0 = temp;
2400 /* Look for happy constants in op1 and op2. */
2401 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2403 HOST_WIDE_INT t = INTVAL (op1);
2404 HOST_WIDE_INT f = INTVAL (op2);
2406 if (t == STORE_FLAG_VALUE && f == 0)
2407 code = GET_CODE (op0);
2408 else if (t == 0 && f == STORE_FLAG_VALUE)
2410 enum rtx_code tmp;
2411 tmp = reversed_comparison_code (op0, NULL_RTX);
2412 if (tmp == UNKNOWN)
2413 break;
2414 code = tmp;
2416 else
2417 break;
2419 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2422 break;
2424 default:
2425 abort ();
2428 return 0;
2431 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2432 Return 0 if no simplifications is possible. */
2434 simplify_subreg (outermode, op, innermode, byte)
2435 rtx op;
2436 unsigned int byte;
2437 enum machine_mode outermode, innermode;
2439 /* Little bit of sanity checking. */
2440 if (innermode == VOIDmode || outermode == VOIDmode
2441 || innermode == BLKmode || outermode == BLKmode)
2442 abort ();
2444 if (GET_MODE (op) != innermode
2445 && GET_MODE (op) != VOIDmode)
2446 abort ();
2448 if (byte % GET_MODE_SIZE (outermode)
2449 || byte >= GET_MODE_SIZE (innermode))
2450 abort ();
2452 if (outermode == innermode && !byte)
2453 return op;
2455 /* Attempt to simplify constant to non-SUBREG expression. */
2456 if (CONSTANT_P (op))
2458 int offset, part;
2459 unsigned HOST_WIDE_INT val = 0;
2461 /* ??? This code is partly redundant with code below, but can handle
2462 the subregs of floats and similar corner cases.
2463 Later it we should move all simplification code here and rewrite
2464 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2465 using SIMPLIFY_SUBREG. */
2466 if (subreg_lowpart_offset (outermode, innermode) == byte)
2468 rtx new = gen_lowpart_if_possible (outermode, op);
2469 if (new)
2470 return new;
2473 /* Similar comment as above apply here. */
2474 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2475 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2476 && GET_MODE_CLASS (outermode) == MODE_INT)
2478 rtx new = constant_subword (op,
2479 (byte / UNITS_PER_WORD),
2480 innermode);
2481 if (new)
2482 return new;
2485 offset = byte * BITS_PER_UNIT;
2486 switch (GET_CODE (op))
2488 case CONST_DOUBLE:
2489 if (GET_MODE (op) != VOIDmode)
2490 break;
2492 /* We can't handle this case yet. */
2493 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2494 return NULL_RTX;
2496 part = offset >= HOST_BITS_PER_WIDE_INT;
2497 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2498 && BYTES_BIG_ENDIAN)
2499 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2500 && WORDS_BIG_ENDIAN))
2501 part = !part;
2502 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2503 offset %= HOST_BITS_PER_WIDE_INT;
2505 /* We've already picked the word we want from a double, so
2506 pretend this is actually an integer. */
2507 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2509 /* FALLTHROUGH */
2510 case CONST_INT:
2511 if (GET_CODE (op) == CONST_INT)
2512 val = INTVAL (op);
2514 /* We don't handle synthetizing of non-integral constants yet. */
2515 if (GET_MODE_CLASS (outermode) != MODE_INT)
2516 return NULL_RTX;
2518 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2520 if (WORDS_BIG_ENDIAN)
2521 offset = (GET_MODE_BITSIZE (innermode)
2522 - GET_MODE_BITSIZE (outermode) - offset);
2523 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2524 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2525 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2526 - 2 * (offset % BITS_PER_WORD));
2529 if (offset >= HOST_BITS_PER_WIDE_INT)
2530 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2531 else
2533 val >>= offset;
2534 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2535 val = trunc_int_for_mode (val, outermode);
2536 return GEN_INT (val);
2538 default:
2539 break;
2543 /* Changing mode twice with SUBREG => just change it once,
2544 or not at all if changing back op starting mode. */
2545 if (GET_CODE (op) == SUBREG)
2547 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2548 int final_offset = byte + SUBREG_BYTE (op);
2549 rtx new;
2551 if (outermode == innermostmode
2552 && byte == 0 && SUBREG_BYTE (op) == 0)
2553 return SUBREG_REG (op);
2555 /* The SUBREG_BYTE represents offset, as if the value were stored
2556 in memory. Irritating exception is paradoxical subreg, where
2557 we define SUBREG_BYTE to be 0. On big endian machines, this
2558 value should be negative. For a moment, undo this exception. */
2559 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2561 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2562 if (WORDS_BIG_ENDIAN)
2563 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2564 if (BYTES_BIG_ENDIAN)
2565 final_offset += difference % UNITS_PER_WORD;
2567 if (SUBREG_BYTE (op) == 0
2568 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2570 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2571 if (WORDS_BIG_ENDIAN)
2572 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2573 if (BYTES_BIG_ENDIAN)
2574 final_offset += difference % UNITS_PER_WORD;
2577 /* See whether resulting subreg will be paradoxical. */
2578 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2580 /* In nonparadoxical subregs we can't handle negative offsets. */
2581 if (final_offset < 0)
2582 return NULL_RTX;
2583 /* Bail out in case resulting subreg would be incorrect. */
2584 if (final_offset % GET_MODE_SIZE (outermode)
2585 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2586 return NULL_RTX;
2588 else
2590 int offset = 0;
2591 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2593 /* In paradoxical subreg, see if we are still looking on lower part.
2594 If so, our SUBREG_BYTE will be 0. */
2595 if (WORDS_BIG_ENDIAN)
2596 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2597 if (BYTES_BIG_ENDIAN)
2598 offset += difference % UNITS_PER_WORD;
2599 if (offset == final_offset)
2600 final_offset = 0;
2601 else
2602 return NULL_RTX;
2605 /* Recurse for futher possible simplifications. */
2606 new = simplify_subreg (outermode, SUBREG_REG (op),
2607 GET_MODE (SUBREG_REG (op)),
2608 final_offset);
2609 if (new)
2610 return new;
2611 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2614 /* SUBREG of a hard register => just change the register number
2615 and/or mode. If the hard register is not valid in that mode,
2616 suppress this simplification. If the hard register is the stack,
2617 frame, or argument pointer, leave this as a SUBREG. */
2619 if (REG_P (op)
2620 && (! REG_FUNCTION_VALUE_P (op)
2621 || ! rtx_equal_function_value_matters)
2622 #ifdef CLASS_CANNOT_CHANGE_MODE
2623 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2624 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2625 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2626 && (TEST_HARD_REG_BIT
2627 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2628 REGNO (op))))
2629 #endif
2630 && REGNO (op) < FIRST_PSEUDO_REGISTER
2631 && ((reload_completed && !frame_pointer_needed)
2632 || (REGNO (op) != FRAME_POINTER_REGNUM
2633 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2634 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2635 #endif
2637 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2638 && REGNO (op) != ARG_POINTER_REGNUM
2639 #endif
2640 && REGNO (op) != STACK_POINTER_REGNUM)
2642 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2645 /* ??? We do allow it if the current REG is not valid for
2646 its mode. This is a kludge to work around how float/complex
2647 arguments are passed on 32-bit Sparc and should be fixed. */
2648 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2649 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2651 rtx x = gen_rtx_REG (outermode, final_regno);
2653 /* Propagate original regno. We don't have any way to specify
2654 the offset inside orignal regno, so do so only for lowpart.
2655 The information is used only by alias analysis that can not
2656 grog partial register anyway. */
2658 if (subreg_lowpart_offset (outermode, innermode) == byte)
2659 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2660 return x;
2664 /* If we have a SUBREG of a register that we are replacing and we are
2665 replacing it with a MEM, make a new MEM and try replacing the
2666 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2667 or if we would be widening it. */
2669 if (GET_CODE (op) == MEM
2670 && ! mode_dependent_address_p (XEXP (op, 0))
2671 /* Allow splitting of volatile memory references in case we don't
2672 have instruction to move the whole thing. */
2673 && (! MEM_VOLATILE_P (op)
2674 || ! have_insn_for (SET, innermode))
2675 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2676 return adjust_address_nv (op, outermode, byte);
2678 /* Handle complex values represented as CONCAT
2679 of real and imaginary part. */
2680 if (GET_CODE (op) == CONCAT)
2682 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2683 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2684 unsigned int final_offset;
2685 rtx res;
2687 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2688 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2689 if (res)
2690 return res;
2691 /* We can at least simplify it by referring directly to the relevant part. */
2692 return gen_rtx_SUBREG (outermode, part, final_offset);
2695 return NULL_RTX;
2697 /* Make a SUBREG operation or equivalent if it folds. */
2700 simplify_gen_subreg (outermode, op, innermode, byte)
2701 rtx op;
2702 unsigned int byte;
2703 enum machine_mode outermode, innermode;
2705 rtx new;
2706 /* Little bit of sanity checking. */
2707 if (innermode == VOIDmode || outermode == VOIDmode
2708 || innermode == BLKmode || outermode == BLKmode)
2709 abort ();
2711 if (GET_MODE (op) != innermode
2712 && GET_MODE (op) != VOIDmode)
2713 abort ();
2715 if (byte % GET_MODE_SIZE (outermode)
2716 || byte >= GET_MODE_SIZE (innermode))
2717 abort ();
2719 if (GET_CODE (op) == QUEUED)
2720 return NULL_RTX;
2722 new = simplify_subreg (outermode, op, innermode, byte);
2723 if (new)
2724 return new;
2726 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2727 return NULL_RTX;
2729 return gen_rtx_SUBREG (outermode, op, byte);
2731 /* Simplify X, an rtx expression.
2733 Return the simplified expression or NULL if no simplifications
2734 were possible.
2736 This is the preferred entry point into the simplification routines;
2737 however, we still allow passes to call the more specific routines.
2739 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2740 code that need to be unified.
2742 1. fold_rtx in cse.c. This code uses various CSE specific
2743 information to aid in RTL simplification.
2745 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2746 it uses combine specific information to aid in RTL
2747 simplification.
2749 3. The routines in this file.
2752 Long term we want to only have one body of simplification code; to
2753 get to that state I recommend the following steps:
2755 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2756 which are not pass dependent state into these routines.
2758 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2759 use this routine whenever possible.
2761 3. Allow for pass dependent state to be provided to these
2762 routines and add simplifications based on the pass dependent
2763 state. Remove code from cse.c & combine.c that becomes
2764 redundant/dead.
2766 It will take time, but ultimately the compiler will be easier to
2767 maintain and improve. It's totally silly that when we add a
2768 simplification that it needs to be added to 4 places (3 for RTL
2769 simplification and 1 for tree simplification. */
2772 simplify_rtx (x)
2773 rtx x;
2775 enum rtx_code code = GET_CODE (x);
2776 enum machine_mode mode = GET_MODE (x);
2778 switch (GET_RTX_CLASS (code))
2780 case '1':
2781 return simplify_unary_operation (code, mode,
2782 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2783 case 'c':
2784 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2786 rtx tem;
2788 tem = XEXP (x, 0);
2789 XEXP (x, 0) = XEXP (x, 1);
2790 XEXP (x, 1) = tem;
2791 return simplify_binary_operation (code, mode,
2792 XEXP (x, 0), XEXP (x, 1));
2795 case '2':
2796 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2798 case '3':
2799 case 'b':
2800 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2801 XEXP (x, 0), XEXP (x, 1),
2802 XEXP (x, 2));
2804 case '<':
2805 return simplify_relational_operation (code,
2806 ((GET_MODE (XEXP (x, 0))
2807 != VOIDmode)
2808 ? GET_MODE (XEXP (x, 0))
2809 : GET_MODE (XEXP (x, 1))),
2810 XEXP (x, 0), XEXP (x, 1));
2811 case 'x':
2812 /* The only case we try to handle is a SUBREG. */
2813 if (code == SUBREG)
2814 return simplify_gen_subreg (mode, SUBREG_REG (x),
2815 GET_MODE (SUBREG_REG (x)),
2816 SUBREG_BYTE (x));
2817 return NULL;
2818 default:
2819 return NULL;