* config/alpha/x-vms (USE_COLLECT2): Set to empty.
[official-gcc.git] / gcc / simplify-rtx.c
blob2db3ec0f7be88c7108a0503fd1687b42ad37479d
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "toplev.h"
37 #include "output.h"
38 #include "ggc.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
65 /* Similar, but also allows reference to the stack pointer.
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
91 /* Much code operates on (low, high) pairs; the low value is an
92 unsigned wide int, the high value a signed wide int. We
93 occasionally need to sign extend from low to high as if low were a
94 signed wide int. */
95 #define HWI_SIGN_EXTEND(low) \
96 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
99 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
100 const void *));
101 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
102 enum machine_mode, rtx, rtx));
103 static void check_fold_consts PARAMS ((PTR));
104 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
105 static void simplify_unary_real PARAMS ((PTR));
106 static void simplify_binary_real PARAMS ((PTR));
107 #endif
108 static void simplify_binary_is2orm1 PARAMS ((PTR));
111 /* Negate a CONST_INT rtx, truncating (because a conversion from a
112 maximally negative number can overflow). */
113 static rtx
114 neg_const_int (mode, i)
115 enum machine_mode mode;
116 rtx i;
118 return GEN_INT (trunc_int_for_mode (- INTVAL (i), mode));
122 /* Make a binary operation by properly ordering the operands and
123 seeing if the expression folds. */
126 simplify_gen_binary (code, mode, op0, op1)
127 enum rtx_code code;
128 enum machine_mode mode;
129 rtx op0, op1;
131 rtx tem;
133 /* Put complex operands first and constants second if commutative. */
134 if (GET_RTX_CLASS (code) == 'c'
135 && swap_commutative_operands_p (op0, op1))
136 tem = op0, op0 = op1, op1 = tem;
138 /* If this simplifies, do it. */
139 tem = simplify_binary_operation (code, mode, op0, op1);
141 if (tem)
142 return tem;
144 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
145 just form the operation. */
147 if (GET_CODE (op1) == CONST_INT
148 && GET_MODE (op0) != VOIDmode
149 && (code == PLUS || code == MINUS))
151 if (code == MINUS)
152 op1 = neg_const_int (mode, op1);
153 return plus_constant (op0, INTVAL (op1));
155 else
156 return gen_rtx_fmt_ee (code, mode, op0, op1);
159 /* If X is a MEM referencing the constant pool, return the real value.
160 Otherwise return X. */
162 avoid_constant_pool_reference (x)
163 rtx x;
165 rtx c, addr;
166 enum machine_mode cmode;
168 if (GET_CODE (x) != MEM)
169 return x;
170 addr = XEXP (x, 0);
172 if (GET_CODE (addr) != SYMBOL_REF
173 || ! CONSTANT_POOL_ADDRESS_P (addr))
174 return x;
176 c = get_pool_constant (addr);
177 cmode = get_pool_mode (addr);
179 /* If we're accessing the constant in a different mode than it was
180 originally stored, attempt to fix that up via subreg simplifications.
181 If that fails we have no choice but to return the original memory. */
182 if (cmode != GET_MODE (x))
184 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
185 return c ? c : x;
188 return c;
191 /* Make a unary operation by first seeing if it folds and otherwise making
192 the specified operation. */
195 simplify_gen_unary (code, mode, op, op_mode)
196 enum rtx_code code;
197 enum machine_mode mode;
198 rtx op;
199 enum machine_mode op_mode;
201 rtx tem;
203 /* If this simplifies, use it. */
204 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
205 return tem;
207 return gen_rtx_fmt_e (code, mode, op);
210 /* Likewise for ternary operations. */
213 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
214 enum rtx_code code;
215 enum machine_mode mode, op0_mode;
216 rtx op0, op1, op2;
218 rtx tem;
220 /* If this simplifies, use it. */
221 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
222 op0, op1, op2)))
223 return tem;
225 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
228 /* Likewise, for relational operations.
229 CMP_MODE specifies mode comparison is done in.
233 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
234 enum rtx_code code;
235 enum machine_mode mode;
236 enum machine_mode cmp_mode;
237 rtx op0, op1;
239 rtx tem;
241 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
242 return tem;
244 /* Put complex operands first and constants second. */
245 if (swap_commutative_operands_p (op0, op1))
246 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
248 return gen_rtx_fmt_ee (code, mode, op0, op1);
251 /* Replace all occurrences of OLD in X with NEW and try to simplify the
252 resulting RTX. Return a new RTX which is as simplified as possible. */
255 simplify_replace_rtx (x, old, new)
256 rtx x;
257 rtx old;
258 rtx new;
260 enum rtx_code code = GET_CODE (x);
261 enum machine_mode mode = GET_MODE (x);
263 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
264 to build a new expression substituting recursively. If we can't do
265 anything, return our input. */
267 if (x == old)
268 return new;
270 switch (GET_RTX_CLASS (code))
272 case '1':
274 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
275 rtx op = (XEXP (x, 0) == old
276 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
278 return simplify_gen_unary (code, mode, op, op_mode);
281 case '2':
282 case 'c':
283 return
284 simplify_gen_binary (code, mode,
285 simplify_replace_rtx (XEXP (x, 0), old, new),
286 simplify_replace_rtx (XEXP (x, 1), old, new));
287 case '<':
289 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
290 ? GET_MODE (XEXP (x, 0))
291 : GET_MODE (XEXP (x, 1)));
292 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
293 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
295 return
296 simplify_gen_relational (code, mode,
297 (op_mode != VOIDmode
298 ? op_mode
299 : GET_MODE (op0) != VOIDmode
300 ? GET_MODE (op0)
301 : GET_MODE (op1)),
302 op0, op1);
305 case '3':
306 case 'b':
308 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
309 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
311 return
312 simplify_gen_ternary (code, mode,
313 (op_mode != VOIDmode
314 ? op_mode
315 : GET_MODE (op0)),
316 op0,
317 simplify_replace_rtx (XEXP (x, 1), old, new),
318 simplify_replace_rtx (XEXP (x, 2), old, new));
321 case 'x':
322 /* The only case we try to handle is a SUBREG. */
323 if (code == SUBREG)
325 rtx exp;
326 exp = simplify_gen_subreg (GET_MODE (x),
327 simplify_replace_rtx (SUBREG_REG (x),
328 old, new),
329 GET_MODE (SUBREG_REG (x)),
330 SUBREG_BYTE (x));
331 if (exp)
332 x = exp;
334 return x;
336 default:
337 if (GET_CODE (x) == MEM)
338 return
339 replace_equiv_address_nv (x,
340 simplify_replace_rtx (XEXP (x, 0),
341 old, new));
343 return x;
345 return x;
348 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
349 /* Subroutine of simplify_unary_operation, called via do_float_handler.
350 Handles simplification of unary ops on floating point values. */
351 struct simplify_unary_real_args
353 rtx operand;
354 rtx result;
355 enum machine_mode mode;
356 enum rtx_code code;
357 bool want_integer;
359 #define REAL_VALUE_ABS(d_) \
360 (REAL_VALUE_NEGATIVE (d_) ? REAL_VALUE_NEGATE (d_) : (d_))
362 static void
363 simplify_unary_real (p)
364 PTR p;
366 REAL_VALUE_TYPE d;
368 struct simplify_unary_real_args *args =
369 (struct simplify_unary_real_args *) p;
371 REAL_VALUE_FROM_CONST_DOUBLE (d, args->operand);
373 if (args->want_integer)
375 HOST_WIDE_INT i;
377 switch (args->code)
379 case FIX: i = REAL_VALUE_FIX (d); break;
380 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
381 default:
382 abort ();
384 args->result = GEN_INT (trunc_int_for_mode (i, args->mode));
386 else
388 switch (args->code)
390 case SQRT:
391 /* We don't attempt to optimize this. */
392 args->result = 0;
393 return;
395 case ABS: d = REAL_VALUE_ABS (d); break;
396 case NEG: d = REAL_VALUE_NEGATE (d); break;
397 case FLOAT_TRUNCATE: d = real_value_truncate (args->mode, d); break;
398 case FLOAT_EXTEND: /* All this does is change the mode. */ break;
399 case FIX: d = REAL_VALUE_RNDZINT (d); break;
400 case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break;
401 default:
402 abort ();
404 args->result = CONST_DOUBLE_FROM_REAL_VALUE (d, args->mode);
407 #endif
409 /* Try to simplify a unary operation CODE whose output mode is to be
410 MODE with input operand OP whose mode was originally OP_MODE.
411 Return zero if no simplification can be made. */
413 simplify_unary_operation (code, mode, op, op_mode)
414 enum rtx_code code;
415 enum machine_mode mode;
416 rtx op;
417 enum machine_mode op_mode;
419 unsigned int width = GET_MODE_BITSIZE (mode);
420 rtx trueop = avoid_constant_pool_reference (op);
422 /* The order of these tests is critical so that, for example, we don't
423 check the wrong mode (input vs. output) for a conversion operation,
424 such as FIX. At some point, this should be simplified. */
426 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
428 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
429 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
431 HOST_WIDE_INT hv, lv;
432 REAL_VALUE_TYPE d;
434 if (GET_CODE (trueop) == CONST_INT)
435 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
436 else
437 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
439 #ifdef REAL_ARITHMETIC
440 REAL_VALUE_FROM_INT (d, lv, hv, mode);
441 #else
442 if (hv < 0)
444 d = (double) (~ hv);
445 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
446 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
447 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
448 d = (- d - 1.0);
450 else
452 d = (double) hv;
453 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
454 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
455 d += (double) (unsigned HOST_WIDE_INT) lv;
457 #endif /* REAL_ARITHMETIC */
458 d = real_value_truncate (mode, d);
459 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
461 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
462 && (GET_CODE (trueop) == CONST_DOUBLE
463 || GET_CODE (trueop) == CONST_INT))
465 HOST_WIDE_INT hv, lv;
466 REAL_VALUE_TYPE d;
468 if (GET_CODE (trueop) == CONST_INT)
469 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
470 else
471 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
473 if (op_mode == VOIDmode)
475 /* We don't know how to interpret negative-looking numbers in
476 this case, so don't try to fold those. */
477 if (hv < 0)
478 return 0;
480 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
482 else
483 hv = 0, lv &= GET_MODE_MASK (op_mode);
485 #ifdef REAL_ARITHMETIC
486 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
487 #else
489 d = (double) (unsigned HOST_WIDE_INT) hv;
490 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
491 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
492 d += (double) (unsigned HOST_WIDE_INT) lv;
493 #endif /* REAL_ARITHMETIC */
494 d = real_value_truncate (mode, d);
495 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
497 #endif
499 if (GET_CODE (trueop) == CONST_INT
500 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
502 HOST_WIDE_INT arg0 = INTVAL (trueop);
503 HOST_WIDE_INT val;
505 switch (code)
507 case NOT:
508 val = ~ arg0;
509 break;
511 case NEG:
512 val = - arg0;
513 break;
515 case ABS:
516 val = (arg0 >= 0 ? arg0 : - arg0);
517 break;
519 case FFS:
520 /* Don't use ffs here. Instead, get low order bit and then its
521 number. If arg0 is zero, this will return 0, as desired. */
522 arg0 &= GET_MODE_MASK (mode);
523 val = exact_log2 (arg0 & (- arg0)) + 1;
524 break;
526 case TRUNCATE:
527 val = arg0;
528 break;
530 case ZERO_EXTEND:
531 if (op_mode == VOIDmode)
532 op_mode = mode;
533 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
535 /* If we were really extending the mode,
536 we would have to distinguish between zero-extension
537 and sign-extension. */
538 if (width != GET_MODE_BITSIZE (op_mode))
539 abort ();
540 val = arg0;
542 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
543 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
544 else
545 return 0;
546 break;
548 case SIGN_EXTEND:
549 if (op_mode == VOIDmode)
550 op_mode = mode;
551 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
553 /* If we were really extending the mode,
554 we would have to distinguish between zero-extension
555 and sign-extension. */
556 if (width != GET_MODE_BITSIZE (op_mode))
557 abort ();
558 val = arg0;
560 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
563 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
564 if (val
565 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
566 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
568 else
569 return 0;
570 break;
572 case SQRT:
573 case FLOAT_EXTEND:
574 case FLOAT_TRUNCATE:
575 case SS_TRUNCATE:
576 case US_TRUNCATE:
577 return 0;
579 default:
580 abort ();
583 val = trunc_int_for_mode (val, mode);
585 return GEN_INT (val);
588 /* We can do some operations on integer CONST_DOUBLEs. Also allow
589 for a DImode operation on a CONST_INT. */
590 else if (GET_MODE (trueop) == VOIDmode && width <= HOST_BITS_PER_INT * 2
591 && (GET_CODE (trueop) == CONST_DOUBLE
592 || GET_CODE (trueop) == CONST_INT))
594 unsigned HOST_WIDE_INT l1, lv;
595 HOST_WIDE_INT h1, hv;
597 if (GET_CODE (trueop) == CONST_DOUBLE)
598 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
599 else
600 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
602 switch (code)
604 case NOT:
605 lv = ~ l1;
606 hv = ~ h1;
607 break;
609 case NEG:
610 neg_double (l1, h1, &lv, &hv);
611 break;
613 case ABS:
614 if (h1 < 0)
615 neg_double (l1, h1, &lv, &hv);
616 else
617 lv = l1, hv = h1;
618 break;
620 case FFS:
621 hv = 0;
622 if (l1 == 0)
623 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
624 else
625 lv = exact_log2 (l1 & (-l1)) + 1;
626 break;
628 case TRUNCATE:
629 /* This is just a change-of-mode, so do nothing. */
630 lv = l1, hv = h1;
631 break;
633 case ZERO_EXTEND:
634 if (op_mode == VOIDmode
635 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
636 return 0;
638 hv = 0;
639 lv = l1 & GET_MODE_MASK (op_mode);
640 break;
642 case SIGN_EXTEND:
643 if (op_mode == VOIDmode
644 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
645 return 0;
646 else
648 lv = l1 & GET_MODE_MASK (op_mode);
649 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
650 && (lv & ((HOST_WIDE_INT) 1
651 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
652 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
654 hv = HWI_SIGN_EXTEND (lv);
656 break;
658 case SQRT:
659 return 0;
661 default:
662 return 0;
665 return immed_double_const (lv, hv, mode);
668 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
669 else if (GET_CODE (trueop) == CONST_DOUBLE
670 && GET_MODE_CLASS (mode) == MODE_FLOAT)
672 struct simplify_unary_real_args args;
673 args.operand = trueop;
674 args.mode = mode;
675 args.code = code;
676 args.want_integer = false;
678 if (do_float_handler (simplify_unary_real, (PTR) &args))
679 return args.result;
681 return 0;
684 else if (GET_CODE (trueop) == CONST_DOUBLE
685 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
686 && GET_MODE_CLASS (mode) == MODE_INT
687 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
689 struct simplify_unary_real_args args;
690 args.operand = trueop;
691 args.mode = mode;
692 args.code = code;
693 args.want_integer = true;
695 if (do_float_handler (simplify_unary_real, (PTR) &args))
696 return args.result;
698 return 0;
700 #endif
701 /* This was formerly used only for non-IEEE float.
702 eggert@twinsun.com says it is safe for IEEE also. */
703 else
705 enum rtx_code reversed;
706 /* There are some simplifications we can do even if the operands
707 aren't constant. */
708 switch (code)
710 case NOT:
711 /* (not (not X)) == X. */
712 if (GET_CODE (op) == NOT)
713 return XEXP (op, 0);
715 /* (not (eq X Y)) == (ne X Y), etc. */
716 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
717 && ((reversed = reversed_comparison_code (op, NULL_RTX))
718 != UNKNOWN))
719 return gen_rtx_fmt_ee (reversed,
720 op_mode, XEXP (op, 0), XEXP (op, 1));
721 break;
723 case NEG:
724 /* (neg (neg X)) == X. */
725 if (GET_CODE (op) == NEG)
726 return XEXP (op, 0);
727 break;
729 case SIGN_EXTEND:
730 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
731 becomes just the MINUS if its mode is MODE. This allows
732 folding switch statements on machines using casesi (such as
733 the VAX). */
734 if (GET_CODE (op) == TRUNCATE
735 && GET_MODE (XEXP (op, 0)) == mode
736 && GET_CODE (XEXP (op, 0)) == MINUS
737 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
738 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
739 return XEXP (op, 0);
741 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
742 if (! POINTERS_EXTEND_UNSIGNED
743 && mode == Pmode && GET_MODE (op) == ptr_mode
744 && (CONSTANT_P (op)
745 || (GET_CODE (op) == SUBREG
746 && GET_CODE (SUBREG_REG (op)) == REG
747 && REG_POINTER (SUBREG_REG (op))
748 && GET_MODE (SUBREG_REG (op)) == Pmode)))
749 return convert_memory_address (Pmode, op);
750 #endif
751 break;
753 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
754 case ZERO_EXTEND:
755 if (POINTERS_EXTEND_UNSIGNED > 0
756 && mode == Pmode && GET_MODE (op) == ptr_mode
757 && (CONSTANT_P (op)
758 || (GET_CODE (op) == SUBREG
759 && GET_CODE (SUBREG_REG (op)) == REG
760 && REG_POINTER (SUBREG_REG (op))
761 && GET_MODE (SUBREG_REG (op)) == Pmode)))
762 return convert_memory_address (Pmode, op);
763 break;
764 #endif
766 default:
767 break;
770 return 0;
774 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
775 /* Subroutine of simplify_binary_operation, called via do_float_handler.
776 Handles simplification of binary ops on floating point values. */
777 struct simplify_binary_real_args
779 rtx trueop0, trueop1;
780 rtx result;
781 enum rtx_code code;
782 enum machine_mode mode;
785 static void
786 simplify_binary_real (p)
787 PTR p;
789 REAL_VALUE_TYPE f0, f1, value;
790 struct simplify_binary_real_args *args =
791 (struct simplify_binary_real_args *) p;
793 REAL_VALUE_FROM_CONST_DOUBLE (f0, args->trueop0);
794 REAL_VALUE_FROM_CONST_DOUBLE (f1, args->trueop1);
795 f0 = real_value_truncate (args->mode, f0);
796 f1 = real_value_truncate (args->mode, f1);
798 #ifdef REAL_ARITHMETIC
799 #ifndef REAL_INFINITY
800 if (args->code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
802 args->result = 0;
803 return;
805 #endif
806 REAL_ARITHMETIC (value, rtx_to_tree_code (args->code), f0, f1);
807 #else
808 switch (args->code)
810 case PLUS:
811 value = f0 + f1;
812 break;
813 case MINUS:
814 value = f0 - f1;
815 break;
816 case MULT:
817 value = f0 * f1;
818 break;
819 case DIV:
820 #ifndef REAL_INFINITY
821 if (f1 == 0)
822 return 0;
823 #endif
824 value = f0 / f1;
825 break;
826 case SMIN:
827 value = MIN (f0, f1);
828 break;
829 case SMAX:
830 value = MAX (f0, f1);
831 break;
832 default:
833 abort ();
835 #endif
837 value = real_value_truncate (args->mode, value);
838 args->result = CONST_DOUBLE_FROM_REAL_VALUE (value, args->mode);
840 #endif
842 /* Another subroutine called via do_float_handler. This one tests
843 the floating point value given against 2. and -1. */
844 struct simplify_binary_is2orm1_args
846 rtx value;
847 bool is_2;
848 bool is_m1;
851 static void
852 simplify_binary_is2orm1 (p)
853 PTR p;
855 REAL_VALUE_TYPE d;
856 struct simplify_binary_is2orm1_args *args =
857 (struct simplify_binary_is2orm1_args *) p;
859 REAL_VALUE_FROM_CONST_DOUBLE (d, args->value);
860 args->is_2 = REAL_VALUES_EQUAL (d, dconst2);
861 args->is_m1 = REAL_VALUES_EQUAL (d, dconstm1);
864 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
865 and OP1. Return 0 if no simplification is possible.
867 Don't use this for relational operations such as EQ or LT.
868 Use simplify_relational_operation instead. */
870 simplify_binary_operation (code, mode, op0, op1)
871 enum rtx_code code;
872 enum machine_mode mode;
873 rtx op0, op1;
875 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
876 HOST_WIDE_INT val;
877 unsigned int width = GET_MODE_BITSIZE (mode);
878 rtx tem;
879 rtx trueop0 = avoid_constant_pool_reference (op0);
880 rtx trueop1 = avoid_constant_pool_reference (op1);
882 /* Relational operations don't work here. We must know the mode
883 of the operands in order to do the comparison correctly.
884 Assuming a full word can give incorrect results.
885 Consider comparing 128 with -128 in QImode. */
887 if (GET_RTX_CLASS (code) == '<')
888 abort ();
890 /* Make sure the constant is second. */
891 if (GET_RTX_CLASS (code) == 'c'
892 && swap_commutative_operands_p (trueop0, trueop1))
894 tem = op0, op0 = op1, op1 = tem;
895 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
898 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
899 if (GET_MODE_CLASS (mode) == MODE_FLOAT
900 && GET_CODE (trueop0) == CONST_DOUBLE
901 && GET_CODE (trueop1) == CONST_DOUBLE
902 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
904 struct simplify_binary_real_args args;
905 args.trueop0 = trueop0;
906 args.trueop1 = trueop1;
907 args.mode = mode;
908 args.code = code;
910 if (do_float_handler (simplify_binary_real, (PTR) &args))
911 return args.result;
912 return 0;
914 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
916 /* We can fold some multi-word operations. */
917 if (GET_MODE_CLASS (mode) == MODE_INT
918 && width == HOST_BITS_PER_WIDE_INT * 2
919 && (GET_CODE (trueop0) == CONST_DOUBLE
920 || GET_CODE (trueop0) == CONST_INT)
921 && (GET_CODE (trueop1) == CONST_DOUBLE
922 || GET_CODE (trueop1) == CONST_INT))
924 unsigned HOST_WIDE_INT l1, l2, lv;
925 HOST_WIDE_INT h1, h2, hv;
927 if (GET_CODE (trueop0) == CONST_DOUBLE)
928 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
929 else
930 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
932 if (GET_CODE (trueop1) == CONST_DOUBLE)
933 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
934 else
935 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
937 switch (code)
939 case MINUS:
940 /* A - B == A + (-B). */
941 neg_double (l2, h2, &lv, &hv);
942 l2 = lv, h2 = hv;
944 /* .. fall through ... */
946 case PLUS:
947 add_double (l1, h1, l2, h2, &lv, &hv);
948 break;
950 case MULT:
951 mul_double (l1, h1, l2, h2, &lv, &hv);
952 break;
954 case DIV: case MOD: case UDIV: case UMOD:
955 /* We'd need to include tree.h to do this and it doesn't seem worth
956 it. */
957 return 0;
959 case AND:
960 lv = l1 & l2, hv = h1 & h2;
961 break;
963 case IOR:
964 lv = l1 | l2, hv = h1 | h2;
965 break;
967 case XOR:
968 lv = l1 ^ l2, hv = h1 ^ h2;
969 break;
971 case SMIN:
972 if (h1 < h2
973 || (h1 == h2
974 && ((unsigned HOST_WIDE_INT) l1
975 < (unsigned HOST_WIDE_INT) l2)))
976 lv = l1, hv = h1;
977 else
978 lv = l2, hv = h2;
979 break;
981 case SMAX:
982 if (h1 > h2
983 || (h1 == h2
984 && ((unsigned HOST_WIDE_INT) l1
985 > (unsigned HOST_WIDE_INT) l2)))
986 lv = l1, hv = h1;
987 else
988 lv = l2, hv = h2;
989 break;
991 case UMIN:
992 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
993 || (h1 == h2
994 && ((unsigned HOST_WIDE_INT) l1
995 < (unsigned HOST_WIDE_INT) l2)))
996 lv = l1, hv = h1;
997 else
998 lv = l2, hv = h2;
999 break;
1001 case UMAX:
1002 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1003 || (h1 == h2
1004 && ((unsigned HOST_WIDE_INT) l1
1005 > (unsigned HOST_WIDE_INT) l2)))
1006 lv = l1, hv = h1;
1007 else
1008 lv = l2, hv = h2;
1009 break;
1011 case LSHIFTRT: case ASHIFTRT:
1012 case ASHIFT:
1013 case ROTATE: case ROTATERT:
1014 #ifdef SHIFT_COUNT_TRUNCATED
1015 if (SHIFT_COUNT_TRUNCATED)
1016 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1017 #endif
1019 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1020 return 0;
1022 if (code == LSHIFTRT || code == ASHIFTRT)
1023 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1024 code == ASHIFTRT);
1025 else if (code == ASHIFT)
1026 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1027 else if (code == ROTATE)
1028 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1029 else /* code == ROTATERT */
1030 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1031 break;
1033 default:
1034 return 0;
1037 return immed_double_const (lv, hv, mode);
1040 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1041 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1043 /* Even if we can't compute a constant result,
1044 there are some cases worth simplifying. */
1046 switch (code)
1048 case PLUS:
1049 /* In IEEE floating point, x+0 is not the same as x. Similarly
1050 for the other optimizations below. */
1051 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1052 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1053 break;
1055 if (trueop1 == CONST0_RTX (mode))
1056 return op0;
1058 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
1059 if (GET_CODE (op0) == NEG)
1060 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1061 else if (GET_CODE (op1) == NEG)
1062 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1064 /* (~a) + 1 -> -a */
1065 if (INTEGRAL_MODE_P (mode)
1066 && GET_CODE (op0) == NOT
1067 && trueop1 == const1_rtx)
1068 return gen_rtx_NEG (mode, XEXP (op0, 0));
1070 /* Handle both-operands-constant cases. We can only add
1071 CONST_INTs to constants since the sum of relocatable symbols
1072 can't be handled by most assemblers. Don't add CONST_INT
1073 to CONST_INT since overflow won't be computed properly if wider
1074 than HOST_BITS_PER_WIDE_INT. */
1076 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1077 && GET_CODE (op1) == CONST_INT)
1078 return plus_constant (op0, INTVAL (op1));
1079 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1080 && GET_CODE (op0) == CONST_INT)
1081 return plus_constant (op1, INTVAL (op0));
1083 /* See if this is something like X * C - X or vice versa or
1084 if the multiplication is written as a shift. If so, we can
1085 distribute and make a new multiply, shift, or maybe just
1086 have X (if C is 2 in the example above). But don't make
1087 real multiply if we didn't have one before. */
1089 if (! FLOAT_MODE_P (mode))
1091 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1092 rtx lhs = op0, rhs = op1;
1093 int had_mult = 0;
1095 if (GET_CODE (lhs) == NEG)
1096 coeff0 = -1, lhs = XEXP (lhs, 0);
1097 else if (GET_CODE (lhs) == MULT
1098 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1100 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1101 had_mult = 1;
1103 else if (GET_CODE (lhs) == ASHIFT
1104 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1105 && INTVAL (XEXP (lhs, 1)) >= 0
1106 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1108 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1109 lhs = XEXP (lhs, 0);
1112 if (GET_CODE (rhs) == NEG)
1113 coeff1 = -1, rhs = XEXP (rhs, 0);
1114 else if (GET_CODE (rhs) == MULT
1115 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1117 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1118 had_mult = 1;
1120 else if (GET_CODE (rhs) == ASHIFT
1121 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1122 && INTVAL (XEXP (rhs, 1)) >= 0
1123 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1125 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1126 rhs = XEXP (rhs, 0);
1129 if (rtx_equal_p (lhs, rhs))
1131 tem = simplify_gen_binary (MULT, mode, lhs,
1132 GEN_INT (coeff0 + coeff1));
1133 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1137 /* If one of the operands is a PLUS or a MINUS, see if we can
1138 simplify this by the associative law.
1139 Don't use the associative law for floating point.
1140 The inaccuracy makes it nonassociative,
1141 and subtle programs can break if operations are associated. */
1143 if (INTEGRAL_MODE_P (mode)
1144 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1145 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1146 || (GET_CODE (op0) == CONST
1147 && GET_CODE (XEXP (op0, 0)) == PLUS)
1148 || (GET_CODE (op1) == CONST
1149 && GET_CODE (XEXP (op1, 0)) == PLUS))
1150 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1151 return tem;
1152 break;
1154 case COMPARE:
1155 #ifdef HAVE_cc0
1156 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1157 using cc0, in which case we want to leave it as a COMPARE
1158 so we can distinguish it from a register-register-copy.
1160 In IEEE floating point, x-0 is not the same as x. */
1162 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1163 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1164 && trueop1 == CONST0_RTX (mode))
1165 return op0;
1166 #endif
1168 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1169 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1170 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1171 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1173 rtx xop00 = XEXP (op0, 0);
1174 rtx xop10 = XEXP (op1, 0);
1176 #ifdef HAVE_cc0
1177 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1178 #else
1179 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1180 && GET_MODE (xop00) == GET_MODE (xop10)
1181 && REGNO (xop00) == REGNO (xop10)
1182 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1183 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1184 #endif
1185 return xop00;
1187 break;
1189 case MINUS:
1190 /* None of these optimizations can be done for IEEE
1191 floating point. */
1192 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1193 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1194 break;
1196 /* We can't assume x-x is 0 even with non-IEEE floating point,
1197 but since it is zero except in very strange circumstances, we
1198 will treat it as zero with -funsafe-math-optimizations. */
1199 if (rtx_equal_p (trueop0, trueop1)
1200 && ! side_effects_p (op0)
1201 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1202 return CONST0_RTX (mode);
1204 /* Change subtraction from zero into negation. */
1205 if (trueop0 == CONST0_RTX (mode))
1206 return gen_rtx_NEG (mode, op1);
1208 /* (-1 - a) is ~a. */
1209 if (trueop0 == constm1_rtx)
1210 return gen_rtx_NOT (mode, op1);
1212 /* Subtracting 0 has no effect. */
1213 if (trueop1 == CONST0_RTX (mode))
1214 return op0;
1216 /* See if this is something like X * C - X or vice versa or
1217 if the multiplication is written as a shift. If so, we can
1218 distribute and make a new multiply, shift, or maybe just
1219 have X (if C is 2 in the example above). But don't make
1220 real multiply if we didn't have one before. */
1222 if (! FLOAT_MODE_P (mode))
1224 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1225 rtx lhs = op0, rhs = op1;
1226 int had_mult = 0;
1228 if (GET_CODE (lhs) == NEG)
1229 coeff0 = -1, lhs = XEXP (lhs, 0);
1230 else if (GET_CODE (lhs) == MULT
1231 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1233 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1234 had_mult = 1;
1236 else if (GET_CODE (lhs) == ASHIFT
1237 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1238 && INTVAL (XEXP (lhs, 1)) >= 0
1239 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1241 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1242 lhs = XEXP (lhs, 0);
1245 if (GET_CODE (rhs) == NEG)
1246 coeff1 = - 1, rhs = XEXP (rhs, 0);
1247 else if (GET_CODE (rhs) == MULT
1248 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1250 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1251 had_mult = 1;
1253 else if (GET_CODE (rhs) == ASHIFT
1254 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1255 && INTVAL (XEXP (rhs, 1)) >= 0
1256 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1258 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1259 rhs = XEXP (rhs, 0);
1262 if (rtx_equal_p (lhs, rhs))
1264 tem = simplify_gen_binary (MULT, mode, lhs,
1265 GEN_INT (coeff0 - coeff1));
1266 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1270 /* (a - (-b)) -> (a + b). */
1271 if (GET_CODE (op1) == NEG)
1272 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1274 /* If one of the operands is a PLUS or a MINUS, see if we can
1275 simplify this by the associative law.
1276 Don't use the associative law for floating point.
1277 The inaccuracy makes it nonassociative,
1278 and subtle programs can break if operations are associated. */
1280 if (INTEGRAL_MODE_P (mode)
1281 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1282 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1283 || (GET_CODE (op0) == CONST
1284 && GET_CODE (XEXP (op0, 0)) == PLUS)
1285 || (GET_CODE (op1) == CONST
1286 && GET_CODE (XEXP (op1, 0)) == PLUS))
1287 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1288 return tem;
1290 /* Don't let a relocatable value get a negative coeff. */
1291 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1292 return simplify_gen_binary (PLUS, mode,
1293 op0,
1294 neg_const_int (mode, op1));
1296 /* (x - (x & y)) -> (x & ~y) */
1297 if (GET_CODE (op1) == AND)
1299 if (rtx_equal_p (op0, XEXP (op1, 0)))
1300 return simplify_gen_binary (AND, mode, op0,
1301 gen_rtx_NOT (mode, XEXP (op1, 1)));
1302 if (rtx_equal_p (op0, XEXP (op1, 1)))
1303 return simplify_gen_binary (AND, mode, op0,
1304 gen_rtx_NOT (mode, XEXP (op1, 0)));
1306 break;
1308 case MULT:
1309 if (trueop1 == constm1_rtx)
1311 tem = simplify_unary_operation (NEG, mode, op0, mode);
1313 return tem ? tem : gen_rtx_NEG (mode, op0);
1316 /* In IEEE floating point, x*0 is not always 0. */
1317 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1318 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1319 && trueop1 == CONST0_RTX (mode)
1320 && ! side_effects_p (op0))
1321 return op1;
1323 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1324 However, ANSI says we can drop signals,
1325 so we can do this anyway. */
1326 if (trueop1 == CONST1_RTX (mode))
1327 return op0;
1329 /* Convert multiply by constant power of two into shift unless
1330 we are still generating RTL. This test is a kludge. */
1331 if (GET_CODE (trueop1) == CONST_INT
1332 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1333 /* If the mode is larger than the host word size, and the
1334 uppermost bit is set, then this isn't a power of two due
1335 to implicit sign extension. */
1336 && (width <= HOST_BITS_PER_WIDE_INT
1337 || val != HOST_BITS_PER_WIDE_INT - 1)
1338 && ! rtx_equal_function_value_matters)
1339 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1341 if (GET_CODE (trueop1) == CONST_DOUBLE
1342 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT)
1344 struct simplify_binary_is2orm1_args args;
1346 args.value = trueop1;
1347 if (! do_float_handler (simplify_binary_is2orm1, (PTR) &args))
1348 return 0;
1350 /* x*2 is x+x and x*(-1) is -x */
1351 if (args.is_2 && GET_MODE (op0) == mode)
1352 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1354 else if (args.is_m1 && GET_MODE (op0) == mode)
1355 return gen_rtx_NEG (mode, op0);
1357 break;
1359 case IOR:
1360 if (trueop1 == const0_rtx)
1361 return op0;
1362 if (GET_CODE (trueop1) == CONST_INT
1363 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1364 == GET_MODE_MASK (mode)))
1365 return op1;
1366 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1367 return op0;
1368 /* A | (~A) -> -1 */
1369 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1370 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1371 && ! side_effects_p (op0)
1372 && GET_MODE_CLASS (mode) != MODE_CC)
1373 return constm1_rtx;
1374 break;
1376 case XOR:
1377 if (trueop1 == const0_rtx)
1378 return op0;
1379 if (GET_CODE (trueop1) == CONST_INT
1380 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1381 == GET_MODE_MASK (mode)))
1382 return gen_rtx_NOT (mode, op0);
1383 if (trueop0 == trueop1 && ! side_effects_p (op0)
1384 && GET_MODE_CLASS (mode) != MODE_CC)
1385 return const0_rtx;
1386 break;
1388 case AND:
1389 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1390 return const0_rtx;
1391 if (GET_CODE (trueop1) == CONST_INT
1392 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1393 == GET_MODE_MASK (mode)))
1394 return op0;
1395 if (trueop0 == trueop1 && ! side_effects_p (op0)
1396 && GET_MODE_CLASS (mode) != MODE_CC)
1397 return op0;
1398 /* A & (~A) -> 0 */
1399 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1400 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1401 && ! side_effects_p (op0)
1402 && GET_MODE_CLASS (mode) != MODE_CC)
1403 return const0_rtx;
1404 break;
1406 case UDIV:
1407 /* Convert divide by power of two into shift (divide by 1 handled
1408 below). */
1409 if (GET_CODE (trueop1) == CONST_INT
1410 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1411 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1413 /* ... fall through ... */
1415 case DIV:
1416 if (trueop1 == CONST1_RTX (mode))
1418 /* On some platforms DIV uses narrower mode than its
1419 operands. */
1420 rtx x = gen_lowpart_common (mode, op0);
1421 if (x)
1422 return x;
1423 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1424 return gen_lowpart_SUBREG (mode, op0);
1425 else
1426 return op0;
1429 /* In IEEE floating point, 0/x is not always 0. */
1430 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1431 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1432 && trueop0 == CONST0_RTX (mode)
1433 && ! side_effects_p (op1))
1434 return op0;
1436 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1437 /* Change division by a constant into multiplication. Only do
1438 this with -funsafe-math-optimizations. */
1439 else if (GET_CODE (trueop1) == CONST_DOUBLE
1440 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1441 && trueop1 != CONST0_RTX (mode)
1442 && flag_unsafe_math_optimizations)
1444 REAL_VALUE_TYPE d;
1445 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1447 if (! REAL_VALUES_EQUAL (d, dconst0))
1449 #if defined (REAL_ARITHMETIC)
1450 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1451 return gen_rtx_MULT (mode, op0,
1452 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1453 #else
1454 return
1455 gen_rtx_MULT (mode, op0,
1456 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1457 #endif
1460 #endif
1461 break;
1463 case UMOD:
1464 /* Handle modulus by power of two (mod with 1 handled below). */
1465 if (GET_CODE (trueop1) == CONST_INT
1466 && exact_log2 (INTVAL (trueop1)) > 0)
1467 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1469 /* ... fall through ... */
1471 case MOD:
1472 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1473 && ! side_effects_p (op0) && ! side_effects_p (op1))
1474 return const0_rtx;
1475 break;
1477 case ROTATERT:
1478 case ROTATE:
1479 /* Rotating ~0 always results in ~0. */
1480 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1481 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1482 && ! side_effects_p (op1))
1483 return op0;
1485 /* ... fall through ... */
1487 case ASHIFT:
1488 case ASHIFTRT:
1489 case LSHIFTRT:
1490 if (trueop1 == const0_rtx)
1491 return op0;
1492 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1493 return op0;
1494 break;
1496 case SMIN:
1497 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1498 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1499 && ! side_effects_p (op0))
1500 return op1;
1501 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1502 return op0;
1503 break;
1505 case SMAX:
1506 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1507 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1508 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1509 && ! side_effects_p (op0))
1510 return op1;
1511 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1512 return op0;
1513 break;
1515 case UMIN:
1516 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1517 return op1;
1518 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1519 return op0;
1520 break;
1522 case UMAX:
1523 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1524 return op1;
1525 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1526 return op0;
1527 break;
1529 case SS_PLUS:
1530 case US_PLUS:
1531 case SS_MINUS:
1532 case US_MINUS:
1533 /* ??? There are simplifications that can be done. */
1534 return 0;
1536 default:
1537 abort ();
1540 return 0;
1543 /* Get the integer argument values in two forms:
1544 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1546 arg0 = INTVAL (trueop0);
1547 arg1 = INTVAL (trueop1);
1549 if (width < HOST_BITS_PER_WIDE_INT)
1551 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1552 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1554 arg0s = arg0;
1555 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1556 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1558 arg1s = arg1;
1559 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1560 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1562 else
1564 arg0s = arg0;
1565 arg1s = arg1;
1568 /* Compute the value of the arithmetic. */
1570 switch (code)
1572 case PLUS:
1573 val = arg0s + arg1s;
1574 break;
1576 case MINUS:
1577 val = arg0s - arg1s;
1578 break;
1580 case MULT:
1581 val = arg0s * arg1s;
1582 break;
1584 case DIV:
1585 if (arg1s == 0
1586 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1587 && arg1s == -1))
1588 return 0;
1589 val = arg0s / arg1s;
1590 break;
1592 case MOD:
1593 if (arg1s == 0
1594 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1595 && arg1s == -1))
1596 return 0;
1597 val = arg0s % arg1s;
1598 break;
1600 case UDIV:
1601 if (arg1 == 0
1602 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1603 && arg1s == -1))
1604 return 0;
1605 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1606 break;
1608 case UMOD:
1609 if (arg1 == 0
1610 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1611 && arg1s == -1))
1612 return 0;
1613 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1614 break;
1616 case AND:
1617 val = arg0 & arg1;
1618 break;
1620 case IOR:
1621 val = arg0 | arg1;
1622 break;
1624 case XOR:
1625 val = arg0 ^ arg1;
1626 break;
1628 case LSHIFTRT:
1629 /* If shift count is undefined, don't fold it; let the machine do
1630 what it wants. But truncate it if the machine will do that. */
1631 if (arg1 < 0)
1632 return 0;
1634 #ifdef SHIFT_COUNT_TRUNCATED
1635 if (SHIFT_COUNT_TRUNCATED)
1636 arg1 %= width;
1637 #endif
1639 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1640 break;
1642 case ASHIFT:
1643 if (arg1 < 0)
1644 return 0;
1646 #ifdef SHIFT_COUNT_TRUNCATED
1647 if (SHIFT_COUNT_TRUNCATED)
1648 arg1 %= width;
1649 #endif
1651 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1652 break;
1654 case ASHIFTRT:
1655 if (arg1 < 0)
1656 return 0;
1658 #ifdef SHIFT_COUNT_TRUNCATED
1659 if (SHIFT_COUNT_TRUNCATED)
1660 arg1 %= width;
1661 #endif
1663 val = arg0s >> arg1;
1665 /* Bootstrap compiler may not have sign extended the right shift.
1666 Manually extend the sign to insure bootstrap cc matches gcc. */
1667 if (arg0s < 0 && arg1 > 0)
1668 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1670 break;
1672 case ROTATERT:
1673 if (arg1 < 0)
1674 return 0;
1676 arg1 %= width;
1677 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1678 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1679 break;
1681 case ROTATE:
1682 if (arg1 < 0)
1683 return 0;
1685 arg1 %= width;
1686 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1687 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1688 break;
1690 case COMPARE:
1691 /* Do nothing here. */
1692 return 0;
1694 case SMIN:
1695 val = arg0s <= arg1s ? arg0s : arg1s;
1696 break;
1698 case UMIN:
1699 val = ((unsigned HOST_WIDE_INT) arg0
1700 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1701 break;
1703 case SMAX:
1704 val = arg0s > arg1s ? arg0s : arg1s;
1705 break;
1707 case UMAX:
1708 val = ((unsigned HOST_WIDE_INT) arg0
1709 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1710 break;
1712 default:
1713 abort ();
1716 val = trunc_int_for_mode (val, mode);
1718 return GEN_INT (val);
1721 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1722 PLUS or MINUS.
1724 Rather than test for specific case, we do this by a brute-force method
1725 and do all possible simplifications until no more changes occur. Then
1726 we rebuild the operation. */
1728 struct simplify_plus_minus_op_data
1730 rtx op;
1731 int neg;
1734 static int
1735 simplify_plus_minus_op_data_cmp (p1, p2)
1736 const void *p1;
1737 const void *p2;
1739 const struct simplify_plus_minus_op_data *d1 = p1;
1740 const struct simplify_plus_minus_op_data *d2 = p2;
1742 return (commutative_operand_precedence (d2->op)
1743 - commutative_operand_precedence (d1->op));
1746 static rtx
1747 simplify_plus_minus (code, mode, op0, op1)
1748 enum rtx_code code;
1749 enum machine_mode mode;
1750 rtx op0, op1;
1752 struct simplify_plus_minus_op_data ops[8];
1753 rtx result, tem;
1754 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1755 int first, negate, changed;
1756 int i, j;
1758 memset ((char *) ops, 0, sizeof ops);
1760 /* Set up the two operands and then expand them until nothing has been
1761 changed. If we run out of room in our array, give up; this should
1762 almost never happen. */
1764 ops[0].op = op0;
1765 ops[0].neg = 0;
1766 ops[1].op = op1;
1767 ops[1].neg = (code == MINUS);
1771 changed = 0;
1773 for (i = 0; i < n_ops; i++)
1775 rtx this_op = ops[i].op;
1776 int this_neg = ops[i].neg;
1777 enum rtx_code this_code = GET_CODE (this_op);
1779 switch (this_code)
1781 case PLUS:
1782 case MINUS:
1783 if (n_ops == 7)
1784 return 0;
1786 ops[n_ops].op = XEXP (this_op, 1);
1787 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1788 n_ops++;
1790 ops[i].op = XEXP (this_op, 0);
1791 input_ops++;
1792 changed = 1;
1793 break;
1795 case NEG:
1796 ops[i].op = XEXP (this_op, 0);
1797 ops[i].neg = ! this_neg;
1798 changed = 1;
1799 break;
1801 case CONST:
1802 ops[i].op = XEXP (this_op, 0);
1803 input_consts++;
1804 changed = 1;
1805 break;
1807 case NOT:
1808 /* ~a -> (-a - 1) */
1809 if (n_ops != 7)
1811 ops[n_ops].op = constm1_rtx;
1812 ops[n_ops++].neg = this_neg;
1813 ops[i].op = XEXP (this_op, 0);
1814 ops[i].neg = !this_neg;
1815 changed = 1;
1817 break;
1819 case CONST_INT:
1820 if (this_neg)
1822 ops[i].op = neg_const_int (mode, this_op);
1823 ops[i].neg = 0;
1824 changed = 1;
1826 break;
1828 default:
1829 break;
1833 while (changed);
1835 /* If we only have two operands, we can't do anything. */
1836 if (n_ops <= 2)
1837 return NULL_RTX;
1839 /* Now simplify each pair of operands until nothing changes. The first
1840 time through just simplify constants against each other. */
1842 first = 1;
1845 changed = first;
1847 for (i = 0; i < n_ops - 1; i++)
1848 for (j = i + 1; j < n_ops; j++)
1850 rtx lhs = ops[i].op, rhs = ops[j].op;
1851 int lneg = ops[i].neg, rneg = ops[j].neg;
1853 if (lhs != 0 && rhs != 0
1854 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1856 enum rtx_code ncode = PLUS;
1858 if (lneg != rneg)
1860 ncode = MINUS;
1861 if (lneg)
1862 tem = lhs, lhs = rhs, rhs = tem;
1864 else if (swap_commutative_operands_p (lhs, rhs))
1865 tem = lhs, lhs = rhs, rhs = tem;
1867 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1869 /* Reject "simplifications" that just wrap the two
1870 arguments in a CONST. Failure to do so can result
1871 in infinite recursion with simplify_binary_operation
1872 when it calls us to simplify CONST operations. */
1873 if (tem
1874 && ! (GET_CODE (tem) == CONST
1875 && GET_CODE (XEXP (tem, 0)) == ncode
1876 && XEXP (XEXP (tem, 0), 0) == lhs
1877 && XEXP (XEXP (tem, 0), 1) == rhs)
1878 /* Don't allow -x + -1 -> ~x simplifications in the
1879 first pass. This allows us the chance to combine
1880 the -1 with other constants. */
1881 && ! (first
1882 && GET_CODE (tem) == NOT
1883 && XEXP (tem, 0) == rhs))
1885 lneg &= rneg;
1886 if (GET_CODE (tem) == NEG)
1887 tem = XEXP (tem, 0), lneg = !lneg;
1888 if (GET_CODE (tem) == CONST_INT && lneg)
1889 tem = neg_const_int (mode, tem), lneg = 0;
1891 ops[i].op = tem;
1892 ops[i].neg = lneg;
1893 ops[j].op = NULL_RTX;
1894 changed = 1;
1899 first = 0;
1901 while (changed);
1903 /* Pack all the operands to the lower-numbered entries. */
1904 for (i = 0, j = 0; j < n_ops; j++)
1905 if (ops[j].op)
1906 ops[i++] = ops[j];
1907 n_ops = i;
1909 /* Sort the operations based on swap_commutative_operands_p. */
1910 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1912 /* We suppressed creation of trivial CONST expressions in the
1913 combination loop to avoid recursion. Create one manually now.
1914 The combination loop should have ensured that there is exactly
1915 one CONST_INT, and the sort will have ensured that it is last
1916 in the array and that any other constant will be next-to-last. */
1918 if (n_ops > 1
1919 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1920 && CONSTANT_P (ops[n_ops - 2].op))
1922 rtx value = ops[n_ops - 1].op;
1923 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1924 value = neg_const_int (mode, value);
1925 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1926 n_ops--;
1929 /* Count the number of CONSTs that we generated. */
1930 n_consts = 0;
1931 for (i = 0; i < n_ops; i++)
1932 if (GET_CODE (ops[i].op) == CONST)
1933 n_consts++;
1935 /* Give up if we didn't reduce the number of operands we had. Make
1936 sure we count a CONST as two operands. If we have the same
1937 number of operands, but have made more CONSTs than before, this
1938 is also an improvement, so accept it. */
1939 if (n_ops + n_consts > input_ops
1940 || (n_ops + n_consts == input_ops && n_consts <= input_consts))
1941 return NULL_RTX;
1943 /* Put a non-negated operand first. If there aren't any, make all
1944 operands positive and negate the whole thing later. */
1946 negate = 0;
1947 for (i = 0; i < n_ops && ops[i].neg; i++)
1948 continue;
1949 if (i == n_ops)
1951 for (i = 0; i < n_ops; i++)
1952 ops[i].neg = 0;
1953 negate = 1;
1955 else if (i != 0)
1957 tem = ops[0].op;
1958 ops[0] = ops[i];
1959 ops[i].op = tem;
1960 ops[i].neg = 1;
1963 /* Now make the result by performing the requested operations. */
1964 result = ops[0].op;
1965 for (i = 1; i < n_ops; i++)
1966 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1967 mode, result, ops[i].op);
1969 return negate ? gen_rtx_NEG (mode, result) : result;
1972 struct cfc_args
1974 rtx op0, op1; /* Input */
1975 int equal, op0lt, op1lt; /* Output */
1976 int unordered;
1979 static void
1980 check_fold_consts (data)
1981 PTR data;
1983 struct cfc_args *args = (struct cfc_args *) data;
1984 REAL_VALUE_TYPE d0, d1;
1986 /* We may possibly raise an exception while reading the value. */
1987 args->unordered = 1;
1988 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1989 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1991 /* Comparisons of Inf versus Inf are ordered. */
1992 if (REAL_VALUE_ISNAN (d0)
1993 || REAL_VALUE_ISNAN (d1))
1994 return;
1995 args->equal = REAL_VALUES_EQUAL (d0, d1);
1996 args->op0lt = REAL_VALUES_LESS (d0, d1);
1997 args->op1lt = REAL_VALUES_LESS (d1, d0);
1998 args->unordered = 0;
2001 /* Like simplify_binary_operation except used for relational operators.
2002 MODE is the mode of the operands, not that of the result. If MODE
2003 is VOIDmode, both operands must also be VOIDmode and we compare the
2004 operands in "infinite precision".
2006 If no simplification is possible, this function returns zero. Otherwise,
2007 it returns either const_true_rtx or const0_rtx. */
2010 simplify_relational_operation (code, mode, op0, op1)
2011 enum rtx_code code;
2012 enum machine_mode mode;
2013 rtx op0, op1;
2015 int equal, op0lt, op0ltu, op1lt, op1ltu;
2016 rtx tem;
2017 rtx trueop0;
2018 rtx trueop1;
2020 if (mode == VOIDmode
2021 && (GET_MODE (op0) != VOIDmode
2022 || GET_MODE (op1) != VOIDmode))
2023 abort ();
2025 /* If op0 is a compare, extract the comparison arguments from it. */
2026 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2027 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2029 trueop0 = avoid_constant_pool_reference (op0);
2030 trueop1 = avoid_constant_pool_reference (op1);
2032 /* We can't simplify MODE_CC values since we don't know what the
2033 actual comparison is. */
2034 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
2035 #ifdef HAVE_cc0
2036 || op0 == cc0_rtx
2037 #endif
2039 return 0;
2041 /* Make sure the constant is second. */
2042 if (swap_commutative_operands_p (trueop0, trueop1))
2044 tem = op0, op0 = op1, op1 = tem;
2045 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2046 code = swap_condition (code);
2049 /* For integer comparisons of A and B maybe we can simplify A - B and can
2050 then simplify a comparison of that with zero. If A and B are both either
2051 a register or a CONST_INT, this can't help; testing for these cases will
2052 prevent infinite recursion here and speed things up.
2054 If CODE is an unsigned comparison, then we can never do this optimization,
2055 because it gives an incorrect result if the subtraction wraps around zero.
2056 ANSI C defines unsigned operations such that they never overflow, and
2057 thus such cases can not be ignored. */
2059 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2060 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2061 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2062 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2063 && code != GTU && code != GEU && code != LTU && code != LEU)
2064 return simplify_relational_operation (signed_condition (code),
2065 mode, tem, const0_rtx);
2067 if (flag_unsafe_math_optimizations && code == ORDERED)
2068 return const_true_rtx;
2070 if (flag_unsafe_math_optimizations && code == UNORDERED)
2071 return const0_rtx;
2073 /* For non-IEEE floating-point, if the two operands are equal, we know the
2074 result. */
2075 if (rtx_equal_p (trueop0, trueop1)
2076 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
2077 || ! FLOAT_MODE_P (GET_MODE (trueop0))
2078 || flag_unsafe_math_optimizations))
2079 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2081 /* If the operands are floating-point constants, see if we can fold
2082 the result. */
2083 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2084 else if (GET_CODE (trueop0) == CONST_DOUBLE
2085 && GET_CODE (trueop1) == CONST_DOUBLE
2086 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2088 struct cfc_args args;
2090 /* Setup input for check_fold_consts() */
2091 args.op0 = trueop0;
2092 args.op1 = trueop1;
2095 if (!do_float_handler (check_fold_consts, (PTR) &args))
2096 args.unordered = 1;
2098 if (args.unordered)
2099 switch (code)
2101 case UNEQ:
2102 case UNLT:
2103 case UNGT:
2104 case UNLE:
2105 case UNGE:
2106 case NE:
2107 case UNORDERED:
2108 return const_true_rtx;
2109 case EQ:
2110 case LT:
2111 case GT:
2112 case LE:
2113 case GE:
2114 case LTGT:
2115 case ORDERED:
2116 return const0_rtx;
2117 default:
2118 return 0;
2121 /* Receive output from check_fold_consts() */
2122 equal = args.equal;
2123 op0lt = op0ltu = args.op0lt;
2124 op1lt = op1ltu = args.op1lt;
2126 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2128 /* Otherwise, see if the operands are both integers. */
2129 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2130 && (GET_CODE (trueop0) == CONST_DOUBLE
2131 || GET_CODE (trueop0) == CONST_INT)
2132 && (GET_CODE (trueop1) == CONST_DOUBLE
2133 || GET_CODE (trueop1) == CONST_INT))
2135 int width = GET_MODE_BITSIZE (mode);
2136 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2137 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2139 /* Get the two words comprising each integer constant. */
2140 if (GET_CODE (trueop0) == CONST_DOUBLE)
2142 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2143 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2145 else
2147 l0u = l0s = INTVAL (trueop0);
2148 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2151 if (GET_CODE (trueop1) == CONST_DOUBLE)
2153 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2154 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2156 else
2158 l1u = l1s = INTVAL (trueop1);
2159 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2162 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2163 we have to sign or zero-extend the values. */
2164 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2166 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2167 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2169 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2170 l0s |= ((HOST_WIDE_INT) (-1) << width);
2172 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2173 l1s |= ((HOST_WIDE_INT) (-1) << width);
2175 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2176 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2178 equal = (h0u == h1u && l0u == l1u);
2179 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2180 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2181 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2182 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2185 /* Otherwise, there are some code-specific tests we can make. */
2186 else
2188 switch (code)
2190 case EQ:
2191 /* References to the frame plus a constant or labels cannot
2192 be zero, but a SYMBOL_REF can due to #pragma weak. */
2193 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2194 || GET_CODE (trueop0) == LABEL_REF)
2195 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2196 /* On some machines, the ap reg can be 0 sometimes. */
2197 && op0 != arg_pointer_rtx
2198 #endif
2200 return const0_rtx;
2201 break;
2203 case NE:
2204 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2205 || GET_CODE (trueop0) == LABEL_REF)
2206 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2207 && op0 != arg_pointer_rtx
2208 #endif
2210 return const_true_rtx;
2211 break;
2213 case GEU:
2214 /* Unsigned values are never negative. */
2215 if (trueop1 == const0_rtx)
2216 return const_true_rtx;
2217 break;
2219 case LTU:
2220 if (trueop1 == const0_rtx)
2221 return const0_rtx;
2222 break;
2224 case LEU:
2225 /* Unsigned values are never greater than the largest
2226 unsigned value. */
2227 if (GET_CODE (trueop1) == CONST_INT
2228 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2229 && INTEGRAL_MODE_P (mode))
2230 return const_true_rtx;
2231 break;
2233 case GTU:
2234 if (GET_CODE (trueop1) == CONST_INT
2235 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2236 && INTEGRAL_MODE_P (mode))
2237 return const0_rtx;
2238 break;
2240 default:
2241 break;
2244 return 0;
2247 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2248 as appropriate. */
2249 switch (code)
2251 case EQ:
2252 case UNEQ:
2253 return equal ? const_true_rtx : const0_rtx;
2254 case NE:
2255 case LTGT:
2256 return ! equal ? const_true_rtx : const0_rtx;
2257 case LT:
2258 case UNLT:
2259 return op0lt ? const_true_rtx : const0_rtx;
2260 case GT:
2261 case UNGT:
2262 return op1lt ? const_true_rtx : const0_rtx;
2263 case LTU:
2264 return op0ltu ? const_true_rtx : const0_rtx;
2265 case GTU:
2266 return op1ltu ? const_true_rtx : const0_rtx;
2267 case LE:
2268 case UNLE:
2269 return equal || op0lt ? const_true_rtx : const0_rtx;
2270 case GE:
2271 case UNGE:
2272 return equal || op1lt ? const_true_rtx : const0_rtx;
2273 case LEU:
2274 return equal || op0ltu ? const_true_rtx : const0_rtx;
2275 case GEU:
2276 return equal || op1ltu ? const_true_rtx : const0_rtx;
2277 case ORDERED:
2278 return const_true_rtx;
2279 case UNORDERED:
2280 return const0_rtx;
2281 default:
2282 abort ();
2286 /* Simplify CODE, an operation with result mode MODE and three operands,
2287 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2288 a constant. Return 0 if no simplifications is possible. */
2291 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2292 enum rtx_code code;
2293 enum machine_mode mode, op0_mode;
2294 rtx op0, op1, op2;
2296 unsigned int width = GET_MODE_BITSIZE (mode);
2298 /* VOIDmode means "infinite" precision. */
2299 if (width == 0)
2300 width = HOST_BITS_PER_WIDE_INT;
2302 switch (code)
2304 case SIGN_EXTRACT:
2305 case ZERO_EXTRACT:
2306 if (GET_CODE (op0) == CONST_INT
2307 && GET_CODE (op1) == CONST_INT
2308 && GET_CODE (op2) == CONST_INT
2309 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2310 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2312 /* Extracting a bit-field from a constant */
2313 HOST_WIDE_INT val = INTVAL (op0);
2315 if (BITS_BIG_ENDIAN)
2316 val >>= (GET_MODE_BITSIZE (op0_mode)
2317 - INTVAL (op2) - INTVAL (op1));
2318 else
2319 val >>= INTVAL (op2);
2321 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2323 /* First zero-extend. */
2324 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2325 /* If desired, propagate sign bit. */
2326 if (code == SIGN_EXTRACT
2327 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2328 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2331 /* Clear the bits that don't belong in our mode,
2332 unless they and our sign bit are all one.
2333 So we get either a reasonable negative value or a reasonable
2334 unsigned value for this mode. */
2335 if (width < HOST_BITS_PER_WIDE_INT
2336 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2337 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2338 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2340 return GEN_INT (val);
2342 break;
2344 case IF_THEN_ELSE:
2345 if (GET_CODE (op0) == CONST_INT)
2346 return op0 != const0_rtx ? op1 : op2;
2348 /* Convert a == b ? b : a to "a". */
2349 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2350 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2351 && rtx_equal_p (XEXP (op0, 0), op1)
2352 && rtx_equal_p (XEXP (op0, 1), op2))
2353 return op1;
2354 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2355 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2356 && rtx_equal_p (XEXP (op0, 1), op1)
2357 && rtx_equal_p (XEXP (op0, 0), op2))
2358 return op2;
2359 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2361 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2362 ? GET_MODE (XEXP (op0, 1))
2363 : GET_MODE (XEXP (op0, 0)));
2364 rtx temp;
2365 if (cmp_mode == VOIDmode)
2366 cmp_mode = op0_mode;
2367 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2368 XEXP (op0, 0), XEXP (op0, 1));
2370 /* See if any simplifications were possible. */
2371 if (temp == const0_rtx)
2372 return op2;
2373 else if (temp == const1_rtx)
2374 return op1;
2375 else if (temp)
2376 op0 = temp;
2378 /* Look for happy constants in op1 and op2. */
2379 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2381 HOST_WIDE_INT t = INTVAL (op1);
2382 HOST_WIDE_INT f = INTVAL (op2);
2384 if (t == STORE_FLAG_VALUE && f == 0)
2385 code = GET_CODE (op0);
2386 else if (t == 0 && f == STORE_FLAG_VALUE)
2388 enum rtx_code tmp;
2389 tmp = reversed_comparison_code (op0, NULL_RTX);
2390 if (tmp == UNKNOWN)
2391 break;
2392 code = tmp;
2394 else
2395 break;
2397 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2400 break;
2402 default:
2403 abort ();
2406 return 0;
2409 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2410 Return 0 if no simplifications is possible. */
2412 simplify_subreg (outermode, op, innermode, byte)
2413 rtx op;
2414 unsigned int byte;
2415 enum machine_mode outermode, innermode;
2417 /* Little bit of sanity checking. */
2418 if (innermode == VOIDmode || outermode == VOIDmode
2419 || innermode == BLKmode || outermode == BLKmode)
2420 abort ();
2422 if (GET_MODE (op) != innermode
2423 && GET_MODE (op) != VOIDmode)
2424 abort ();
2426 if (byte % GET_MODE_SIZE (outermode)
2427 || byte >= GET_MODE_SIZE (innermode))
2428 abort ();
2430 if (outermode == innermode && !byte)
2431 return op;
2433 /* Attempt to simplify constant to non-SUBREG expression. */
2434 if (CONSTANT_P (op))
2436 int offset, part;
2437 unsigned HOST_WIDE_INT val = 0;
2439 /* ??? This code is partly redundant with code below, but can handle
2440 the subregs of floats and similar corner cases.
2441 Later it we should move all simplification code here and rewrite
2442 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2443 using SIMPLIFY_SUBREG. */
2444 if (subreg_lowpart_offset (outermode, innermode) == byte)
2446 rtx new = gen_lowpart_if_possible (outermode, op);
2447 if (new)
2448 return new;
2451 /* Similar comment as above apply here. */
2452 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2453 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2454 && GET_MODE_CLASS (outermode) == MODE_INT)
2456 rtx new = constant_subword (op,
2457 (byte / UNITS_PER_WORD),
2458 innermode);
2459 if (new)
2460 return new;
2463 offset = byte * BITS_PER_UNIT;
2464 switch (GET_CODE (op))
2466 case CONST_DOUBLE:
2467 if (GET_MODE (op) != VOIDmode)
2468 break;
2470 /* We can't handle this case yet. */
2471 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2472 return NULL_RTX;
2474 part = offset >= HOST_BITS_PER_WIDE_INT;
2475 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2476 && BYTES_BIG_ENDIAN)
2477 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2478 && WORDS_BIG_ENDIAN))
2479 part = !part;
2480 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2481 offset %= HOST_BITS_PER_WIDE_INT;
2483 /* We've already picked the word we want from a double, so
2484 pretend this is actually an integer. */
2485 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2487 /* FALLTHROUGH */
2488 case CONST_INT:
2489 if (GET_CODE (op) == CONST_INT)
2490 val = INTVAL (op);
2492 /* We don't handle synthetizing of non-integral constants yet. */
2493 if (GET_MODE_CLASS (outermode) != MODE_INT)
2494 return NULL_RTX;
2496 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2498 if (WORDS_BIG_ENDIAN)
2499 offset = (GET_MODE_BITSIZE (innermode)
2500 - GET_MODE_BITSIZE (outermode) - offset);
2501 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2502 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2503 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2504 - 2 * (offset % BITS_PER_WORD));
2507 if (offset >= HOST_BITS_PER_WIDE_INT)
2508 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2509 else
2511 val >>= offset;
2512 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2513 val = trunc_int_for_mode (val, outermode);
2514 return GEN_INT (val);
2516 default:
2517 break;
2521 /* Changing mode twice with SUBREG => just change it once,
2522 or not at all if changing back op starting mode. */
2523 if (GET_CODE (op) == SUBREG)
2525 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2526 int final_offset = byte + SUBREG_BYTE (op);
2527 rtx new;
2529 if (outermode == innermostmode
2530 && byte == 0 && SUBREG_BYTE (op) == 0)
2531 return SUBREG_REG (op);
2533 /* The SUBREG_BYTE represents offset, as if the value were stored
2534 in memory. Irritating exception is paradoxical subreg, where
2535 we define SUBREG_BYTE to be 0. On big endian machines, this
2536 value should be negative. For a moment, undo this exception. */
2537 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2539 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2540 if (WORDS_BIG_ENDIAN)
2541 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2542 if (BYTES_BIG_ENDIAN)
2543 final_offset += difference % UNITS_PER_WORD;
2545 if (SUBREG_BYTE (op) == 0
2546 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2548 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2549 if (WORDS_BIG_ENDIAN)
2550 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2551 if (BYTES_BIG_ENDIAN)
2552 final_offset += difference % UNITS_PER_WORD;
2555 /* See whether resulting subreg will be paradoxical. */
2556 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2558 /* In nonparadoxical subregs we can't handle negative offsets. */
2559 if (final_offset < 0)
2560 return NULL_RTX;
2561 /* Bail out in case resulting subreg would be incorrect. */
2562 if (final_offset % GET_MODE_SIZE (outermode)
2563 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2564 return NULL_RTX;
2566 else
2568 int offset = 0;
2569 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2571 /* In paradoxical subreg, see if we are still looking on lower part.
2572 If so, our SUBREG_BYTE will be 0. */
2573 if (WORDS_BIG_ENDIAN)
2574 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2575 if (BYTES_BIG_ENDIAN)
2576 offset += difference % UNITS_PER_WORD;
2577 if (offset == final_offset)
2578 final_offset = 0;
2579 else
2580 return NULL_RTX;
2583 /* Recurse for futher possible simplifications. */
2584 new = simplify_subreg (outermode, SUBREG_REG (op),
2585 GET_MODE (SUBREG_REG (op)),
2586 final_offset);
2587 if (new)
2588 return new;
2589 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2592 /* SUBREG of a hard register => just change the register number
2593 and/or mode. If the hard register is not valid in that mode,
2594 suppress this simplification. If the hard register is the stack,
2595 frame, or argument pointer, leave this as a SUBREG. */
2597 if (REG_P (op)
2598 && (! REG_FUNCTION_VALUE_P (op)
2599 || ! rtx_equal_function_value_matters)
2600 #ifdef CLASS_CANNOT_CHANGE_MODE
2601 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2602 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2603 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2604 && (TEST_HARD_REG_BIT
2605 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2606 REGNO (op))))
2607 #endif
2608 && REGNO (op) < FIRST_PSEUDO_REGISTER
2609 && ((reload_completed && !frame_pointer_needed)
2610 || (REGNO (op) != FRAME_POINTER_REGNUM
2611 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2612 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2613 #endif
2615 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2616 && REGNO (op) != ARG_POINTER_REGNUM
2617 #endif
2618 && REGNO (op) != STACK_POINTER_REGNUM)
2620 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2623 /* ??? We do allow it if the current REG is not valid for
2624 its mode. This is a kludge to work around how float/complex
2625 arguments are passed on 32-bit Sparc and should be fixed. */
2626 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2627 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2629 rtx x = gen_rtx_REG (outermode, final_regno);
2631 /* Propagate original regno. We don't have any way to specify
2632 the offset inside orignal regno, so do so only for lowpart.
2633 The information is used only by alias analysis that can not
2634 grog partial register anyway. */
2636 if (subreg_lowpart_offset (outermode, innermode) == byte)
2637 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2638 return x;
2642 /* If we have a SUBREG of a register that we are replacing and we are
2643 replacing it with a MEM, make a new MEM and try replacing the
2644 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2645 or if we would be widening it. */
2647 if (GET_CODE (op) == MEM
2648 && ! mode_dependent_address_p (XEXP (op, 0))
2649 /* Allow splitting of volatile memory references in case we don't
2650 have instruction to move the whole thing. */
2651 && (! MEM_VOLATILE_P (op)
2652 || ! have_insn_for (SET, innermode))
2653 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2654 return adjust_address_nv (op, outermode, byte);
2656 /* Handle complex values represented as CONCAT
2657 of real and imaginary part. */
2658 if (GET_CODE (op) == CONCAT)
2660 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2661 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2662 unsigned int final_offset;
2663 rtx res;
2665 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2666 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2667 if (res)
2668 return res;
2669 /* We can at least simplify it by referring directly to the relevant part. */
2670 return gen_rtx_SUBREG (outermode, part, final_offset);
2673 return NULL_RTX;
2675 /* Make a SUBREG operation or equivalent if it folds. */
2678 simplify_gen_subreg (outermode, op, innermode, byte)
2679 rtx op;
2680 unsigned int byte;
2681 enum machine_mode outermode, innermode;
2683 rtx new;
2684 /* Little bit of sanity checking. */
2685 if (innermode == VOIDmode || outermode == VOIDmode
2686 || innermode == BLKmode || outermode == BLKmode)
2687 abort ();
2689 if (GET_MODE (op) != innermode
2690 && GET_MODE (op) != VOIDmode)
2691 abort ();
2693 if (byte % GET_MODE_SIZE (outermode)
2694 || byte >= GET_MODE_SIZE (innermode))
2695 abort ();
2697 if (GET_CODE (op) == QUEUED)
2698 return NULL_RTX;
2700 new = simplify_subreg (outermode, op, innermode, byte);
2701 if (new)
2702 return new;
2704 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2705 return NULL_RTX;
2707 return gen_rtx_SUBREG (outermode, op, byte);
2709 /* Simplify X, an rtx expression.
2711 Return the simplified expression or NULL if no simplifications
2712 were possible.
2714 This is the preferred entry point into the simplification routines;
2715 however, we still allow passes to call the more specific routines.
2717 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2718 code that need to be unified.
2720 1. fold_rtx in cse.c. This code uses various CSE specific
2721 information to aid in RTL simplification.
2723 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2724 it uses combine specific information to aid in RTL
2725 simplification.
2727 3. The routines in this file.
2730 Long term we want to only have one body of simplification code; to
2731 get to that state I recommend the following steps:
2733 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2734 which are not pass dependent state into these routines.
2736 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2737 use this routine whenever possible.
2739 3. Allow for pass dependent state to be provided to these
2740 routines and add simplifications based on the pass dependent
2741 state. Remove code from cse.c & combine.c that becomes
2742 redundant/dead.
2744 It will take time, but ultimately the compiler will be easier to
2745 maintain and improve. It's totally silly that when we add a
2746 simplification that it needs to be added to 4 places (3 for RTL
2747 simplification and 1 for tree simplification. */
2750 simplify_rtx (x)
2751 rtx x;
2753 enum rtx_code code = GET_CODE (x);
2754 enum machine_mode mode = GET_MODE (x);
2756 switch (GET_RTX_CLASS (code))
2758 case '1':
2759 return simplify_unary_operation (code, mode,
2760 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2761 case 'c':
2762 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2764 rtx tem;
2766 tem = XEXP (x, 0);
2767 XEXP (x, 0) = XEXP (x, 1);
2768 XEXP (x, 1) = tem;
2769 return simplify_binary_operation (code, mode,
2770 XEXP (x, 0), XEXP (x, 1));
2773 case '2':
2774 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2776 case '3':
2777 case 'b':
2778 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2779 XEXP (x, 0), XEXP (x, 1),
2780 XEXP (x, 2));
2782 case '<':
2783 return simplify_relational_operation (code,
2784 ((GET_MODE (XEXP (x, 0))
2785 != VOIDmode)
2786 ? GET_MODE (XEXP (x, 0))
2787 : GET_MODE (XEXP (x, 1))),
2788 XEXP (x, 0), XEXP (x, 1));
2789 case 'x':
2790 /* The only case we try to handle is a SUBREG. */
2791 if (code == SUBREG)
2792 return simplify_gen_subreg (mode, SUBREG_REG (x),
2793 GET_MODE (SUBREG_REG (x)),
2794 SUBREG_BYTE (x));
2795 return NULL;
2796 default:
2797 return NULL;