* config/xtensa/xtensa.h (ASM_OUTPUT_POOL_PROLOGUE): Emit a
[official-gcc.git] / gcc / simplify-rtx.c
blob21012ce31a3a3d4e6af11ac59d30777d78ec781e
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "toplev.h"
37 #include "output.h"
38 #include "ggc.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
65 /* Similar, but also allows reference to the stack pointer.
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
91 /* Much code operates on (low, high) pairs; the low value is an
92 unsigned wide int, the high value a signed wide int. We
93 occasionally need to sign extend from low to high as if low were a
94 signed wide int. */
95 #define HWI_SIGN_EXTEND(low) \
96 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
99 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
100 const void *));
101 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
102 enum machine_mode, rtx,
103 rtx, int));
104 static void check_fold_consts PARAMS ((PTR));
105 static void simplify_unary_real PARAMS ((PTR));
106 static void simplify_binary_real PARAMS ((PTR));
107 static void simplify_binary_is2orm1 PARAMS ((PTR));
110 /* Negate a CONST_INT rtx, truncating (because a conversion from a
111 maximally negative number can overflow). */
112 static rtx
113 neg_const_int (mode, i)
114 enum machine_mode mode;
115 rtx i;
117 return gen_int_mode (- INTVAL (i), mode);
121 /* Make a binary operation by properly ordering the operands and
122 seeing if the expression folds. */
125 simplify_gen_binary (code, mode, op0, op1)
126 enum rtx_code code;
127 enum machine_mode mode;
128 rtx op0, op1;
130 rtx tem;
132 /* Put complex operands first and constants second if commutative. */
133 if (GET_RTX_CLASS (code) == 'c'
134 && swap_commutative_operands_p (op0, op1))
135 tem = op0, op0 = op1, op1 = tem;
137 /* If this simplifies, do it. */
138 tem = simplify_binary_operation (code, mode, op0, op1);
139 if (tem)
140 return tem;
142 /* Handle addition and subtraction specially. Otherwise, just form
143 the operation. */
145 if (code == PLUS || code == MINUS)
147 tem = simplify_plus_minus (code, mode, op0, op1, 1);
148 if (tem)
149 return tem;
152 return gen_rtx_fmt_ee (code, mode, op0, op1);
155 /* If X is a MEM referencing the constant pool, return the real value.
156 Otherwise return X. */
158 avoid_constant_pool_reference (x)
159 rtx x;
161 rtx c, addr;
162 enum machine_mode cmode;
164 if (GET_CODE (x) != MEM)
165 return x;
166 addr = XEXP (x, 0);
168 if (GET_CODE (addr) != SYMBOL_REF
169 || ! CONSTANT_POOL_ADDRESS_P (addr))
170 return x;
172 c = get_pool_constant (addr);
173 cmode = get_pool_mode (addr);
175 /* If we're accessing the constant in a different mode than it was
176 originally stored, attempt to fix that up via subreg simplifications.
177 If that fails we have no choice but to return the original memory. */
178 if (cmode != GET_MODE (x))
180 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
181 return c ? c : x;
184 return c;
187 /* Make a unary operation by first seeing if it folds and otherwise making
188 the specified operation. */
191 simplify_gen_unary (code, mode, op, op_mode)
192 enum rtx_code code;
193 enum machine_mode mode;
194 rtx op;
195 enum machine_mode op_mode;
197 rtx tem;
199 /* If this simplifies, use it. */
200 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
201 return tem;
203 return gen_rtx_fmt_e (code, mode, op);
206 /* Likewise for ternary operations. */
209 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
210 enum rtx_code code;
211 enum machine_mode mode, op0_mode;
212 rtx op0, op1, op2;
214 rtx tem;
216 /* If this simplifies, use it. */
217 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
218 op0, op1, op2)))
219 return tem;
221 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
224 /* Likewise, for relational operations.
225 CMP_MODE specifies mode comparison is done in.
229 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
230 enum rtx_code code;
231 enum machine_mode mode;
232 enum machine_mode cmp_mode;
233 rtx op0, op1;
235 rtx tem;
237 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
238 return tem;
240 /* Put complex operands first and constants second. */
241 if (swap_commutative_operands_p (op0, op1))
242 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
244 return gen_rtx_fmt_ee (code, mode, op0, op1);
247 /* Replace all occurrences of OLD in X with NEW and try to simplify the
248 resulting RTX. Return a new RTX which is as simplified as possible. */
251 simplify_replace_rtx (x, old, new)
252 rtx x;
253 rtx old;
254 rtx new;
256 enum rtx_code code = GET_CODE (x);
257 enum machine_mode mode = GET_MODE (x);
259 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
260 to build a new expression substituting recursively. If we can't do
261 anything, return our input. */
263 if (x == old)
264 return new;
266 switch (GET_RTX_CLASS (code))
268 case '1':
270 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
271 rtx op = (XEXP (x, 0) == old
272 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
274 return simplify_gen_unary (code, mode, op, op_mode);
277 case '2':
278 case 'c':
279 return
280 simplify_gen_binary (code, mode,
281 simplify_replace_rtx (XEXP (x, 0), old, new),
282 simplify_replace_rtx (XEXP (x, 1), old, new));
283 case '<':
285 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
286 ? GET_MODE (XEXP (x, 0))
287 : GET_MODE (XEXP (x, 1)));
288 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
289 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
291 return
292 simplify_gen_relational (code, mode,
293 (op_mode != VOIDmode
294 ? op_mode
295 : GET_MODE (op0) != VOIDmode
296 ? GET_MODE (op0)
297 : GET_MODE (op1)),
298 op0, op1);
301 case '3':
302 case 'b':
304 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
305 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
307 return
308 simplify_gen_ternary (code, mode,
309 (op_mode != VOIDmode
310 ? op_mode
311 : GET_MODE (op0)),
312 op0,
313 simplify_replace_rtx (XEXP (x, 1), old, new),
314 simplify_replace_rtx (XEXP (x, 2), old, new));
317 case 'x':
318 /* The only case we try to handle is a SUBREG. */
319 if (code == SUBREG)
321 rtx exp;
322 exp = simplify_gen_subreg (GET_MODE (x),
323 simplify_replace_rtx (SUBREG_REG (x),
324 old, new),
325 GET_MODE (SUBREG_REG (x)),
326 SUBREG_BYTE (x));
327 if (exp)
328 x = exp;
330 return x;
332 default:
333 if (GET_CODE (x) == MEM)
334 return
335 replace_equiv_address_nv (x,
336 simplify_replace_rtx (XEXP (x, 0),
337 old, new));
339 return x;
341 return x;
344 /* Subroutine of simplify_unary_operation, called via do_float_handler.
345 Handles simplification of unary ops on floating point values. */
346 struct simplify_unary_real_args
348 rtx operand;
349 rtx result;
350 enum machine_mode mode;
351 enum rtx_code code;
352 bool want_integer;
354 #define REAL_VALUE_ABS(d_) \
355 (REAL_VALUE_NEGATIVE (d_) ? REAL_VALUE_NEGATE (d_) : (d_))
357 static void
358 simplify_unary_real (p)
359 PTR p;
361 REAL_VALUE_TYPE d;
363 struct simplify_unary_real_args *args =
364 (struct simplify_unary_real_args *) p;
366 REAL_VALUE_FROM_CONST_DOUBLE (d, args->operand);
368 if (args->want_integer)
370 HOST_WIDE_INT i;
372 switch (args->code)
374 case FIX: i = REAL_VALUE_FIX (d); break;
375 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
376 default:
377 abort ();
379 args->result = gen_int_mode (i, args->mode);
381 else
383 switch (args->code)
385 case SQRT:
386 /* We don't attempt to optimize this. */
387 args->result = 0;
388 return;
390 case ABS: d = REAL_VALUE_ABS (d); break;
391 case NEG: d = REAL_VALUE_NEGATE (d); break;
392 case FLOAT_TRUNCATE: d = real_value_truncate (args->mode, d); break;
393 case FLOAT_EXTEND: /* All this does is change the mode. */ break;
394 case FIX: d = REAL_VALUE_RNDZINT (d); break;
395 case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break;
396 default:
397 abort ();
399 args->result = CONST_DOUBLE_FROM_REAL_VALUE (d, args->mode);
403 /* Try to simplify a unary operation CODE whose output mode is to be
404 MODE with input operand OP whose mode was originally OP_MODE.
405 Return zero if no simplification can be made. */
407 simplify_unary_operation (code, mode, op, op_mode)
408 enum rtx_code code;
409 enum machine_mode mode;
410 rtx op;
411 enum machine_mode op_mode;
413 unsigned int width = GET_MODE_BITSIZE (mode);
414 rtx trueop = avoid_constant_pool_reference (op);
416 /* The order of these tests is critical so that, for example, we don't
417 check the wrong mode (input vs. output) for a conversion operation,
418 such as FIX. At some point, this should be simplified. */
420 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
421 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
423 HOST_WIDE_INT hv, lv;
424 REAL_VALUE_TYPE d;
426 if (GET_CODE (trueop) == CONST_INT)
427 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
428 else
429 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
431 REAL_VALUE_FROM_INT (d, lv, hv, mode);
432 d = real_value_truncate (mode, d);
433 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
435 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
436 && (GET_CODE (trueop) == CONST_DOUBLE
437 || GET_CODE (trueop) == CONST_INT))
439 HOST_WIDE_INT hv, lv;
440 REAL_VALUE_TYPE d;
442 if (GET_CODE (trueop) == CONST_INT)
443 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
444 else
445 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
447 if (op_mode == VOIDmode)
449 /* We don't know how to interpret negative-looking numbers in
450 this case, so don't try to fold those. */
451 if (hv < 0)
452 return 0;
454 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
456 else
457 hv = 0, lv &= GET_MODE_MASK (op_mode);
459 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
460 d = real_value_truncate (mode, d);
461 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
464 if (GET_CODE (trueop) == CONST_INT
465 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
467 HOST_WIDE_INT arg0 = INTVAL (trueop);
468 HOST_WIDE_INT val;
470 switch (code)
472 case NOT:
473 val = ~ arg0;
474 break;
476 case NEG:
477 val = - arg0;
478 break;
480 case ABS:
481 val = (arg0 >= 0 ? arg0 : - arg0);
482 break;
484 case FFS:
485 /* Don't use ffs here. Instead, get low order bit and then its
486 number. If arg0 is zero, this will return 0, as desired. */
487 arg0 &= GET_MODE_MASK (mode);
488 val = exact_log2 (arg0 & (- arg0)) + 1;
489 break;
491 case TRUNCATE:
492 val = arg0;
493 break;
495 case ZERO_EXTEND:
496 /* When zero-extending a CONST_INT, we need to know its
497 original mode. */
498 if (op_mode == VOIDmode)
499 abort ();
500 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
502 /* If we were really extending the mode,
503 we would have to distinguish between zero-extension
504 and sign-extension. */
505 if (width != GET_MODE_BITSIZE (op_mode))
506 abort ();
507 val = arg0;
509 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
510 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
511 else
512 return 0;
513 break;
515 case SIGN_EXTEND:
516 if (op_mode == VOIDmode)
517 op_mode = mode;
518 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
520 /* If we were really extending the mode,
521 we would have to distinguish between zero-extension
522 and sign-extension. */
523 if (width != GET_MODE_BITSIZE (op_mode))
524 abort ();
525 val = arg0;
527 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
530 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
531 if (val
532 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
533 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
535 else
536 return 0;
537 break;
539 case SQRT:
540 case FLOAT_EXTEND:
541 case FLOAT_TRUNCATE:
542 case SS_TRUNCATE:
543 case US_TRUNCATE:
544 return 0;
546 default:
547 abort ();
550 val = trunc_int_for_mode (val, mode);
552 return GEN_INT (val);
555 /* We can do some operations on integer CONST_DOUBLEs. Also allow
556 for a DImode operation on a CONST_INT. */
557 else if (GET_MODE (trueop) == VOIDmode
558 && width <= HOST_BITS_PER_WIDE_INT * 2
559 && (GET_CODE (trueop) == CONST_DOUBLE
560 || GET_CODE (trueop) == CONST_INT))
562 unsigned HOST_WIDE_INT l1, lv;
563 HOST_WIDE_INT h1, hv;
565 if (GET_CODE (trueop) == CONST_DOUBLE)
566 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
567 else
568 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
570 switch (code)
572 case NOT:
573 lv = ~ l1;
574 hv = ~ h1;
575 break;
577 case NEG:
578 neg_double (l1, h1, &lv, &hv);
579 break;
581 case ABS:
582 if (h1 < 0)
583 neg_double (l1, h1, &lv, &hv);
584 else
585 lv = l1, hv = h1;
586 break;
588 case FFS:
589 hv = 0;
590 if (l1 == 0)
591 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
592 else
593 lv = exact_log2 (l1 & (-l1)) + 1;
594 break;
596 case TRUNCATE:
597 /* This is just a change-of-mode, so do nothing. */
598 lv = l1, hv = h1;
599 break;
601 case ZERO_EXTEND:
602 if (op_mode == VOIDmode)
603 abort ();
605 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
606 return 0;
608 hv = 0;
609 lv = l1 & GET_MODE_MASK (op_mode);
610 break;
612 case SIGN_EXTEND:
613 if (op_mode == VOIDmode
614 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
615 return 0;
616 else
618 lv = l1 & GET_MODE_MASK (op_mode);
619 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
620 && (lv & ((HOST_WIDE_INT) 1
621 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
622 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
624 hv = HWI_SIGN_EXTEND (lv);
626 break;
628 case SQRT:
629 return 0;
631 default:
632 return 0;
635 return immed_double_const (lv, hv, mode);
638 else if (GET_CODE (trueop) == CONST_DOUBLE
639 && GET_MODE_CLASS (mode) == MODE_FLOAT)
641 struct simplify_unary_real_args args;
642 args.operand = trueop;
643 args.mode = mode;
644 args.code = code;
645 args.want_integer = false;
647 if (do_float_handler (simplify_unary_real, (PTR) &args))
648 return args.result;
650 return 0;
653 else if (GET_CODE (trueop) == CONST_DOUBLE
654 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
655 && GET_MODE_CLASS (mode) == MODE_INT
656 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
658 struct simplify_unary_real_args args;
659 args.operand = trueop;
660 args.mode = mode;
661 args.code = code;
662 args.want_integer = true;
664 if (do_float_handler (simplify_unary_real, (PTR) &args))
665 return args.result;
667 return 0;
670 /* This was formerly used only for non-IEEE float.
671 eggert@twinsun.com says it is safe for IEEE also. */
672 else
674 enum rtx_code reversed;
675 /* There are some simplifications we can do even if the operands
676 aren't constant. */
677 switch (code)
679 case NOT:
680 /* (not (not X)) == X. */
681 if (GET_CODE (op) == NOT)
682 return XEXP (op, 0);
684 /* (not (eq X Y)) == (ne X Y), etc. */
685 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
686 && ((reversed = reversed_comparison_code (op, NULL_RTX))
687 != UNKNOWN))
688 return gen_rtx_fmt_ee (reversed,
689 op_mode, XEXP (op, 0), XEXP (op, 1));
690 break;
692 case NEG:
693 /* (neg (neg X)) == X. */
694 if (GET_CODE (op) == NEG)
695 return XEXP (op, 0);
696 break;
698 case SIGN_EXTEND:
699 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
700 becomes just the MINUS if its mode is MODE. This allows
701 folding switch statements on machines using casesi (such as
702 the VAX). */
703 if (GET_CODE (op) == TRUNCATE
704 && GET_MODE (XEXP (op, 0)) == mode
705 && GET_CODE (XEXP (op, 0)) == MINUS
706 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
707 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
708 return XEXP (op, 0);
710 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
711 if (! POINTERS_EXTEND_UNSIGNED
712 && mode == Pmode && GET_MODE (op) == ptr_mode
713 && (CONSTANT_P (op)
714 || (GET_CODE (op) == SUBREG
715 && GET_CODE (SUBREG_REG (op)) == REG
716 && REG_POINTER (SUBREG_REG (op))
717 && GET_MODE (SUBREG_REG (op)) == Pmode)))
718 return convert_memory_address (Pmode, op);
719 #endif
720 break;
722 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
723 case ZERO_EXTEND:
724 if (POINTERS_EXTEND_UNSIGNED > 0
725 && mode == Pmode && GET_MODE (op) == ptr_mode
726 && (CONSTANT_P (op)
727 || (GET_CODE (op) == SUBREG
728 && GET_CODE (SUBREG_REG (op)) == REG
729 && REG_POINTER (SUBREG_REG (op))
730 && GET_MODE (SUBREG_REG (op)) == Pmode)))
731 return convert_memory_address (Pmode, op);
732 break;
733 #endif
735 default:
736 break;
739 return 0;
743 /* Subroutine of simplify_binary_operation, called via do_float_handler.
744 Handles simplification of binary ops on floating point values. */
745 struct simplify_binary_real_args
747 rtx trueop0, trueop1;
748 rtx result;
749 enum rtx_code code;
750 enum machine_mode mode;
753 static void
754 simplify_binary_real (p)
755 PTR p;
757 REAL_VALUE_TYPE f0, f1, value;
758 struct simplify_binary_real_args *args =
759 (struct simplify_binary_real_args *) p;
761 REAL_VALUE_FROM_CONST_DOUBLE (f0, args->trueop0);
762 REAL_VALUE_FROM_CONST_DOUBLE (f1, args->trueop1);
763 f0 = real_value_truncate (args->mode, f0);
764 f1 = real_value_truncate (args->mode, f1);
766 #ifndef REAL_INFINITY
767 if (args->code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
769 args->result = 0;
770 return;
772 #endif
773 REAL_ARITHMETIC (value, rtx_to_tree_code (args->code), f0, f1);
775 value = real_value_truncate (args->mode, value);
776 args->result = CONST_DOUBLE_FROM_REAL_VALUE (value, args->mode);
779 /* Another subroutine called via do_float_handler. This one tests
780 the floating point value given against 2. and -1. */
781 struct simplify_binary_is2orm1_args
783 rtx value;
784 bool is_2;
785 bool is_m1;
788 static void
789 simplify_binary_is2orm1 (p)
790 PTR p;
792 REAL_VALUE_TYPE d;
793 struct simplify_binary_is2orm1_args *args =
794 (struct simplify_binary_is2orm1_args *) p;
796 REAL_VALUE_FROM_CONST_DOUBLE (d, args->value);
797 args->is_2 = REAL_VALUES_EQUAL (d, dconst2);
798 args->is_m1 = REAL_VALUES_EQUAL (d, dconstm1);
801 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
802 and OP1. Return 0 if no simplification is possible.
804 Don't use this for relational operations such as EQ or LT.
805 Use simplify_relational_operation instead. */
807 simplify_binary_operation (code, mode, op0, op1)
808 enum rtx_code code;
809 enum machine_mode mode;
810 rtx op0, op1;
812 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
813 HOST_WIDE_INT val;
814 unsigned int width = GET_MODE_BITSIZE (mode);
815 rtx tem;
816 rtx trueop0 = avoid_constant_pool_reference (op0);
817 rtx trueop1 = avoid_constant_pool_reference (op1);
819 /* Relational operations don't work here. We must know the mode
820 of the operands in order to do the comparison correctly.
821 Assuming a full word can give incorrect results.
822 Consider comparing 128 with -128 in QImode. */
824 if (GET_RTX_CLASS (code) == '<')
825 abort ();
827 /* Make sure the constant is second. */
828 if (GET_RTX_CLASS (code) == 'c'
829 && swap_commutative_operands_p (trueop0, trueop1))
831 tem = op0, op0 = op1, op1 = tem;
832 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
835 if (GET_MODE_CLASS (mode) == MODE_FLOAT
836 && GET_CODE (trueop0) == CONST_DOUBLE
837 && GET_CODE (trueop1) == CONST_DOUBLE
838 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
840 struct simplify_binary_real_args args;
841 args.trueop0 = trueop0;
842 args.trueop1 = trueop1;
843 args.mode = mode;
844 args.code = code;
846 if (do_float_handler (simplify_binary_real, (PTR) &args))
847 return args.result;
848 return 0;
851 /* We can fold some multi-word operations. */
852 if (GET_MODE_CLASS (mode) == MODE_INT
853 && width == HOST_BITS_PER_WIDE_INT * 2
854 && (GET_CODE (trueop0) == CONST_DOUBLE
855 || GET_CODE (trueop0) == CONST_INT)
856 && (GET_CODE (trueop1) == CONST_DOUBLE
857 || GET_CODE (trueop1) == CONST_INT))
859 unsigned HOST_WIDE_INT l1, l2, lv;
860 HOST_WIDE_INT h1, h2, hv;
862 if (GET_CODE (trueop0) == CONST_DOUBLE)
863 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
864 else
865 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
867 if (GET_CODE (trueop1) == CONST_DOUBLE)
868 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
869 else
870 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
872 switch (code)
874 case MINUS:
875 /* A - B == A + (-B). */
876 neg_double (l2, h2, &lv, &hv);
877 l2 = lv, h2 = hv;
879 /* .. fall through ... */
881 case PLUS:
882 add_double (l1, h1, l2, h2, &lv, &hv);
883 break;
885 case MULT:
886 mul_double (l1, h1, l2, h2, &lv, &hv);
887 break;
889 case DIV: case MOD: case UDIV: case UMOD:
890 /* We'd need to include tree.h to do this and it doesn't seem worth
891 it. */
892 return 0;
894 case AND:
895 lv = l1 & l2, hv = h1 & h2;
896 break;
898 case IOR:
899 lv = l1 | l2, hv = h1 | h2;
900 break;
902 case XOR:
903 lv = l1 ^ l2, hv = h1 ^ h2;
904 break;
906 case SMIN:
907 if (h1 < h2
908 || (h1 == h2
909 && ((unsigned HOST_WIDE_INT) l1
910 < (unsigned HOST_WIDE_INT) l2)))
911 lv = l1, hv = h1;
912 else
913 lv = l2, hv = h2;
914 break;
916 case SMAX:
917 if (h1 > h2
918 || (h1 == h2
919 && ((unsigned HOST_WIDE_INT) l1
920 > (unsigned HOST_WIDE_INT) l2)))
921 lv = l1, hv = h1;
922 else
923 lv = l2, hv = h2;
924 break;
926 case UMIN:
927 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
928 || (h1 == h2
929 && ((unsigned HOST_WIDE_INT) l1
930 < (unsigned HOST_WIDE_INT) l2)))
931 lv = l1, hv = h1;
932 else
933 lv = l2, hv = h2;
934 break;
936 case UMAX:
937 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
938 || (h1 == h2
939 && ((unsigned HOST_WIDE_INT) l1
940 > (unsigned HOST_WIDE_INT) l2)))
941 lv = l1, hv = h1;
942 else
943 lv = l2, hv = h2;
944 break;
946 case LSHIFTRT: case ASHIFTRT:
947 case ASHIFT:
948 case ROTATE: case ROTATERT:
949 #ifdef SHIFT_COUNT_TRUNCATED
950 if (SHIFT_COUNT_TRUNCATED)
951 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
952 #endif
954 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
955 return 0;
957 if (code == LSHIFTRT || code == ASHIFTRT)
958 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
959 code == ASHIFTRT);
960 else if (code == ASHIFT)
961 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
962 else if (code == ROTATE)
963 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
964 else /* code == ROTATERT */
965 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
966 break;
968 default:
969 return 0;
972 return immed_double_const (lv, hv, mode);
975 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
976 || width > HOST_BITS_PER_WIDE_INT || width == 0)
978 /* Even if we can't compute a constant result,
979 there are some cases worth simplifying. */
981 switch (code)
983 case PLUS:
984 /* Maybe simplify x + 0 to x. The two expressions are equivalent
985 when x is NaN, infinite, or finite and non-zero. They aren't
986 when x is -0 and the rounding mode is not towards -infinity,
987 since (-0) + 0 is then 0. */
988 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
989 return op0;
991 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
992 transformations are safe even for IEEE. */
993 if (GET_CODE (op0) == NEG)
994 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
995 else if (GET_CODE (op1) == NEG)
996 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
998 /* (~a) + 1 -> -a */
999 if (INTEGRAL_MODE_P (mode)
1000 && GET_CODE (op0) == NOT
1001 && trueop1 == const1_rtx)
1002 return gen_rtx_NEG (mode, XEXP (op0, 0));
1004 /* Handle both-operands-constant cases. We can only add
1005 CONST_INTs to constants since the sum of relocatable symbols
1006 can't be handled by most assemblers. Don't add CONST_INT
1007 to CONST_INT since overflow won't be computed properly if wider
1008 than HOST_BITS_PER_WIDE_INT. */
1010 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1011 && GET_CODE (op1) == CONST_INT)
1012 return plus_constant (op0, INTVAL (op1));
1013 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1014 && GET_CODE (op0) == CONST_INT)
1015 return plus_constant (op1, INTVAL (op0));
1017 /* See if this is something like X * C - X or vice versa or
1018 if the multiplication is written as a shift. If so, we can
1019 distribute and make a new multiply, shift, or maybe just
1020 have X (if C is 2 in the example above). But don't make
1021 real multiply if we didn't have one before. */
1023 if (! FLOAT_MODE_P (mode))
1025 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1026 rtx lhs = op0, rhs = op1;
1027 int had_mult = 0;
1029 if (GET_CODE (lhs) == NEG)
1030 coeff0 = -1, lhs = XEXP (lhs, 0);
1031 else if (GET_CODE (lhs) == MULT
1032 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1034 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1035 had_mult = 1;
1037 else if (GET_CODE (lhs) == ASHIFT
1038 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1039 && INTVAL (XEXP (lhs, 1)) >= 0
1040 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1042 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1043 lhs = XEXP (lhs, 0);
1046 if (GET_CODE (rhs) == NEG)
1047 coeff1 = -1, rhs = XEXP (rhs, 0);
1048 else if (GET_CODE (rhs) == MULT
1049 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1051 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1052 had_mult = 1;
1054 else if (GET_CODE (rhs) == ASHIFT
1055 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1056 && INTVAL (XEXP (rhs, 1)) >= 0
1057 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1059 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1060 rhs = XEXP (rhs, 0);
1063 if (rtx_equal_p (lhs, rhs))
1065 tem = simplify_gen_binary (MULT, mode, lhs,
1066 GEN_INT (coeff0 + coeff1));
1067 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1071 /* If one of the operands is a PLUS or a MINUS, see if we can
1072 simplify this by the associative law.
1073 Don't use the associative law for floating point.
1074 The inaccuracy makes it nonassociative,
1075 and subtle programs can break if operations are associated. */
1077 if (INTEGRAL_MODE_P (mode)
1078 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1079 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1080 || (GET_CODE (op0) == CONST
1081 && GET_CODE (XEXP (op0, 0)) == PLUS)
1082 || (GET_CODE (op1) == CONST
1083 && GET_CODE (XEXP (op1, 0)) == PLUS))
1084 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1085 return tem;
1086 break;
1088 case COMPARE:
1089 #ifdef HAVE_cc0
1090 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1091 using cc0, in which case we want to leave it as a COMPARE
1092 so we can distinguish it from a register-register-copy.
1094 In IEEE floating point, x-0 is not the same as x. */
1096 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1097 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1098 && trueop1 == CONST0_RTX (mode))
1099 return op0;
1100 #endif
1102 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1103 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1104 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1105 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1107 rtx xop00 = XEXP (op0, 0);
1108 rtx xop10 = XEXP (op1, 0);
1110 #ifdef HAVE_cc0
1111 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1112 #else
1113 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1114 && GET_MODE (xop00) == GET_MODE (xop10)
1115 && REGNO (xop00) == REGNO (xop10)
1116 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1117 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1118 #endif
1119 return xop00;
1121 break;
1123 case MINUS:
1124 /* We can't assume x-x is 0 even with non-IEEE floating point,
1125 but since it is zero except in very strange circumstances, we
1126 will treat it as zero with -funsafe-math-optimizations. */
1127 if (rtx_equal_p (trueop0, trueop1)
1128 && ! side_effects_p (op0)
1129 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1130 return CONST0_RTX (mode);
1132 /* Change subtraction from zero into negation. (0 - x) is the
1133 same as -x when x is NaN, infinite, or finite and non-zero.
1134 But if the mode has signed zeros, and does not round towards
1135 -infinity, then 0 - 0 is 0, not -0. */
1136 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1137 return gen_rtx_NEG (mode, op1);
1139 /* (-1 - a) is ~a. */
1140 if (trueop0 == constm1_rtx)
1141 return gen_rtx_NOT (mode, op1);
1143 /* Subtracting 0 has no effect unless the mode has signed zeros
1144 and supports rounding towards -infinity. In such a case,
1145 0 - 0 is -0. */
1146 if (!(HONOR_SIGNED_ZEROS (mode)
1147 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1148 && trueop1 == CONST0_RTX (mode))
1149 return op0;
1151 /* See if this is something like X * C - X or vice versa or
1152 if the multiplication is written as a shift. If so, we can
1153 distribute and make a new multiply, shift, or maybe just
1154 have X (if C is 2 in the example above). But don't make
1155 real multiply if we didn't have one before. */
1157 if (! FLOAT_MODE_P (mode))
1159 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1160 rtx lhs = op0, rhs = op1;
1161 int had_mult = 0;
1163 if (GET_CODE (lhs) == NEG)
1164 coeff0 = -1, lhs = XEXP (lhs, 0);
1165 else if (GET_CODE (lhs) == MULT
1166 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1168 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1169 had_mult = 1;
1171 else if (GET_CODE (lhs) == ASHIFT
1172 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1173 && INTVAL (XEXP (lhs, 1)) >= 0
1174 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1176 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1177 lhs = XEXP (lhs, 0);
1180 if (GET_CODE (rhs) == NEG)
1181 coeff1 = - 1, rhs = XEXP (rhs, 0);
1182 else if (GET_CODE (rhs) == MULT
1183 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1185 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1186 had_mult = 1;
1188 else if (GET_CODE (rhs) == ASHIFT
1189 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1190 && INTVAL (XEXP (rhs, 1)) >= 0
1191 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1193 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1194 rhs = XEXP (rhs, 0);
1197 if (rtx_equal_p (lhs, rhs))
1199 tem = simplify_gen_binary (MULT, mode, lhs,
1200 GEN_INT (coeff0 - coeff1));
1201 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1205 /* (a - (-b)) -> (a + b). True even for IEEE. */
1206 if (GET_CODE (op1) == NEG)
1207 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1209 /* If one of the operands is a PLUS or a MINUS, see if we can
1210 simplify this by the associative law.
1211 Don't use the associative law for floating point.
1212 The inaccuracy makes it nonassociative,
1213 and subtle programs can break if operations are associated. */
1215 if (INTEGRAL_MODE_P (mode)
1216 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1217 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1218 || (GET_CODE (op0) == CONST
1219 && GET_CODE (XEXP (op0, 0)) == PLUS)
1220 || (GET_CODE (op1) == CONST
1221 && GET_CODE (XEXP (op1, 0)) == PLUS))
1222 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1223 return tem;
1225 /* Don't let a relocatable value get a negative coeff. */
1226 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1227 return simplify_gen_binary (PLUS, mode,
1228 op0,
1229 neg_const_int (mode, op1));
1231 /* (x - (x & y)) -> (x & ~y) */
1232 if (GET_CODE (op1) == AND)
1234 if (rtx_equal_p (op0, XEXP (op1, 0)))
1235 return simplify_gen_binary (AND, mode, op0,
1236 gen_rtx_NOT (mode, XEXP (op1, 1)));
1237 if (rtx_equal_p (op0, XEXP (op1, 1)))
1238 return simplify_gen_binary (AND, mode, op0,
1239 gen_rtx_NOT (mode, XEXP (op1, 0)));
1241 break;
1243 case MULT:
1244 if (trueop1 == constm1_rtx)
1246 tem = simplify_unary_operation (NEG, mode, op0, mode);
1248 return tem ? tem : gen_rtx_NEG (mode, op0);
1251 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1252 x is NaN, since x * 0 is then also NaN. Nor is it valid
1253 when the mode has signed zeros, since multiplying a negative
1254 number by 0 will give -0, not 0. */
1255 if (!HONOR_NANS (mode)
1256 && !HONOR_SIGNED_ZEROS (mode)
1257 && trueop1 == CONST0_RTX (mode)
1258 && ! side_effects_p (op0))
1259 return op1;
1261 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1262 However, ANSI says we can drop signals,
1263 so we can do this anyway. */
1264 if (trueop1 == CONST1_RTX (mode))
1265 return op0;
1267 /* Convert multiply by constant power of two into shift unless
1268 we are still generating RTL. This test is a kludge. */
1269 if (GET_CODE (trueop1) == CONST_INT
1270 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1271 /* If the mode is larger than the host word size, and the
1272 uppermost bit is set, then this isn't a power of two due
1273 to implicit sign extension. */
1274 && (width <= HOST_BITS_PER_WIDE_INT
1275 || val != HOST_BITS_PER_WIDE_INT - 1)
1276 && ! rtx_equal_function_value_matters)
1277 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1279 if (GET_CODE (trueop1) == CONST_DOUBLE
1280 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT)
1282 struct simplify_binary_is2orm1_args args;
1284 args.value = trueop1;
1285 if (! do_float_handler (simplify_binary_is2orm1, (PTR) &args))
1286 return 0;
1288 /* x*2 is x+x and x*(-1) is -x */
1289 if (args.is_2 && GET_MODE (op0) == mode)
1290 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1292 else if (args.is_m1 && GET_MODE (op0) == mode)
1293 return gen_rtx_NEG (mode, op0);
1295 break;
1297 case IOR:
1298 if (trueop1 == const0_rtx)
1299 return op0;
1300 if (GET_CODE (trueop1) == CONST_INT
1301 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1302 == GET_MODE_MASK (mode)))
1303 return op1;
1304 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1305 return op0;
1306 /* A | (~A) -> -1 */
1307 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1308 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1309 && ! side_effects_p (op0)
1310 && GET_MODE_CLASS (mode) != MODE_CC)
1311 return constm1_rtx;
1312 break;
1314 case XOR:
1315 if (trueop1 == const0_rtx)
1316 return op0;
1317 if (GET_CODE (trueop1) == CONST_INT
1318 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1319 == GET_MODE_MASK (mode)))
1320 return gen_rtx_NOT (mode, op0);
1321 if (trueop0 == trueop1 && ! side_effects_p (op0)
1322 && GET_MODE_CLASS (mode) != MODE_CC)
1323 return const0_rtx;
1324 break;
1326 case AND:
1327 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1328 return const0_rtx;
1329 if (GET_CODE (trueop1) == CONST_INT
1330 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1331 == GET_MODE_MASK (mode)))
1332 return op0;
1333 if (trueop0 == trueop1 && ! side_effects_p (op0)
1334 && GET_MODE_CLASS (mode) != MODE_CC)
1335 return op0;
1336 /* A & (~A) -> 0 */
1337 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1338 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1339 && ! side_effects_p (op0)
1340 && GET_MODE_CLASS (mode) != MODE_CC)
1341 return const0_rtx;
1342 break;
1344 case UDIV:
1345 /* Convert divide by power of two into shift (divide by 1 handled
1346 below). */
1347 if (GET_CODE (trueop1) == CONST_INT
1348 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1349 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1351 /* ... fall through ... */
1353 case DIV:
1354 if (trueop1 == CONST1_RTX (mode))
1356 /* On some platforms DIV uses narrower mode than its
1357 operands. */
1358 rtx x = gen_lowpart_common (mode, op0);
1359 if (x)
1360 return x;
1361 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1362 return gen_lowpart_SUBREG (mode, op0);
1363 else
1364 return op0;
1367 /* Maybe change 0 / x to 0. This transformation isn't safe for
1368 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1369 Nor is it safe for modes with signed zeros, since dividing
1370 0 by a negative number gives -0, not 0. */
1371 if (!HONOR_NANS (mode)
1372 && !HONOR_SIGNED_ZEROS (mode)
1373 && trueop0 == CONST0_RTX (mode)
1374 && ! side_effects_p (op1))
1375 return op0;
1377 /* Change division by a constant into multiplication. Only do
1378 this with -funsafe-math-optimizations. */
1379 else if (GET_CODE (trueop1) == CONST_DOUBLE
1380 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1381 && trueop1 != CONST0_RTX (mode)
1382 && flag_unsafe_math_optimizations)
1384 REAL_VALUE_TYPE d;
1385 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1387 if (! REAL_VALUES_EQUAL (d, dconst0))
1389 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1390 return gen_rtx_MULT (mode, op0,
1391 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1394 break;
1396 case UMOD:
1397 /* Handle modulus by power of two (mod with 1 handled below). */
1398 if (GET_CODE (trueop1) == CONST_INT
1399 && exact_log2 (INTVAL (trueop1)) > 0)
1400 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1402 /* ... fall through ... */
1404 case MOD:
1405 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1406 && ! side_effects_p (op0) && ! side_effects_p (op1))
1407 return const0_rtx;
1408 break;
1410 case ROTATERT:
1411 case ROTATE:
1412 /* Rotating ~0 always results in ~0. */
1413 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1414 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1415 && ! side_effects_p (op1))
1416 return op0;
1418 /* ... fall through ... */
1420 case ASHIFT:
1421 case ASHIFTRT:
1422 case LSHIFTRT:
1423 if (trueop1 == const0_rtx)
1424 return op0;
1425 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1426 return op0;
1427 break;
1429 case SMIN:
1430 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1431 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1432 && ! side_effects_p (op0))
1433 return op1;
1434 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1435 return op0;
1436 break;
1438 case SMAX:
1439 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1440 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1441 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1442 && ! side_effects_p (op0))
1443 return op1;
1444 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1445 return op0;
1446 break;
1448 case UMIN:
1449 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1450 return op1;
1451 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1452 return op0;
1453 break;
1455 case UMAX:
1456 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1457 return op1;
1458 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1459 return op0;
1460 break;
1462 case SS_PLUS:
1463 case US_PLUS:
1464 case SS_MINUS:
1465 case US_MINUS:
1466 /* ??? There are simplifications that can be done. */
1467 return 0;
1469 default:
1470 abort ();
1473 return 0;
1476 /* Get the integer argument values in two forms:
1477 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1479 arg0 = INTVAL (trueop0);
1480 arg1 = INTVAL (trueop1);
1482 if (width < HOST_BITS_PER_WIDE_INT)
1484 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1485 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1487 arg0s = arg0;
1488 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1489 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1491 arg1s = arg1;
1492 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1493 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1495 else
1497 arg0s = arg0;
1498 arg1s = arg1;
1501 /* Compute the value of the arithmetic. */
1503 switch (code)
1505 case PLUS:
1506 val = arg0s + arg1s;
1507 break;
1509 case MINUS:
1510 val = arg0s - arg1s;
1511 break;
1513 case MULT:
1514 val = arg0s * arg1s;
1515 break;
1517 case DIV:
1518 if (arg1s == 0
1519 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1520 && arg1s == -1))
1521 return 0;
1522 val = arg0s / arg1s;
1523 break;
1525 case MOD:
1526 if (arg1s == 0
1527 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1528 && arg1s == -1))
1529 return 0;
1530 val = arg0s % arg1s;
1531 break;
1533 case UDIV:
1534 if (arg1 == 0
1535 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1536 && arg1s == -1))
1537 return 0;
1538 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1539 break;
1541 case UMOD:
1542 if (arg1 == 0
1543 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1544 && arg1s == -1))
1545 return 0;
1546 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1547 break;
1549 case AND:
1550 val = arg0 & arg1;
1551 break;
1553 case IOR:
1554 val = arg0 | arg1;
1555 break;
1557 case XOR:
1558 val = arg0 ^ arg1;
1559 break;
1561 case LSHIFTRT:
1562 /* If shift count is undefined, don't fold it; let the machine do
1563 what it wants. But truncate it if the machine will do that. */
1564 if (arg1 < 0)
1565 return 0;
1567 #ifdef SHIFT_COUNT_TRUNCATED
1568 if (SHIFT_COUNT_TRUNCATED)
1569 arg1 %= width;
1570 #endif
1572 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1573 break;
1575 case ASHIFT:
1576 if (arg1 < 0)
1577 return 0;
1579 #ifdef SHIFT_COUNT_TRUNCATED
1580 if (SHIFT_COUNT_TRUNCATED)
1581 arg1 %= width;
1582 #endif
1584 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1585 break;
1587 case ASHIFTRT:
1588 if (arg1 < 0)
1589 return 0;
1591 #ifdef SHIFT_COUNT_TRUNCATED
1592 if (SHIFT_COUNT_TRUNCATED)
1593 arg1 %= width;
1594 #endif
1596 val = arg0s >> arg1;
1598 /* Bootstrap compiler may not have sign extended the right shift.
1599 Manually extend the sign to insure bootstrap cc matches gcc. */
1600 if (arg0s < 0 && arg1 > 0)
1601 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1603 break;
1605 case ROTATERT:
1606 if (arg1 < 0)
1607 return 0;
1609 arg1 %= width;
1610 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1611 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1612 break;
1614 case ROTATE:
1615 if (arg1 < 0)
1616 return 0;
1618 arg1 %= width;
1619 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1620 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1621 break;
1623 case COMPARE:
1624 /* Do nothing here. */
1625 return 0;
1627 case SMIN:
1628 val = arg0s <= arg1s ? arg0s : arg1s;
1629 break;
1631 case UMIN:
1632 val = ((unsigned HOST_WIDE_INT) arg0
1633 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1634 break;
1636 case SMAX:
1637 val = arg0s > arg1s ? arg0s : arg1s;
1638 break;
1640 case UMAX:
1641 val = ((unsigned HOST_WIDE_INT) arg0
1642 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1643 break;
1645 default:
1646 abort ();
1649 val = trunc_int_for_mode (val, mode);
1651 return GEN_INT (val);
1654 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1655 PLUS or MINUS.
1657 Rather than test for specific case, we do this by a brute-force method
1658 and do all possible simplifications until no more changes occur. Then
1659 we rebuild the operation.
1661 If FORCE is true, then always generate the rtx. This is used to
1662 canonicalize stuff emitted from simplify_gen_binary. Note that this
1663 can still fail if the rtx is too complex. It won't fail just because
1664 the result is not 'simpler' than the input, however. */
1666 struct simplify_plus_minus_op_data
1668 rtx op;
1669 int neg;
1672 static int
1673 simplify_plus_minus_op_data_cmp (p1, p2)
1674 const void *p1;
1675 const void *p2;
1677 const struct simplify_plus_minus_op_data *d1 = p1;
1678 const struct simplify_plus_minus_op_data *d2 = p2;
1680 return (commutative_operand_precedence (d2->op)
1681 - commutative_operand_precedence (d1->op));
1684 static rtx
1685 simplify_plus_minus (code, mode, op0, op1, force)
1686 enum rtx_code code;
1687 enum machine_mode mode;
1688 rtx op0, op1;
1689 int force;
1691 struct simplify_plus_minus_op_data ops[8];
1692 rtx result, tem;
1693 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1694 int first, negate, changed;
1695 int i, j;
1697 memset ((char *) ops, 0, sizeof ops);
1699 /* Set up the two operands and then expand them until nothing has been
1700 changed. If we run out of room in our array, give up; this should
1701 almost never happen. */
1703 ops[0].op = op0;
1704 ops[0].neg = 0;
1705 ops[1].op = op1;
1706 ops[1].neg = (code == MINUS);
1710 changed = 0;
1712 for (i = 0; i < n_ops; i++)
1714 rtx this_op = ops[i].op;
1715 int this_neg = ops[i].neg;
1716 enum rtx_code this_code = GET_CODE (this_op);
1718 switch (this_code)
1720 case PLUS:
1721 case MINUS:
1722 if (n_ops == 7)
1723 return NULL_RTX;
1725 ops[n_ops].op = XEXP (this_op, 1);
1726 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1727 n_ops++;
1729 ops[i].op = XEXP (this_op, 0);
1730 input_ops++;
1731 changed = 1;
1732 break;
1734 case NEG:
1735 ops[i].op = XEXP (this_op, 0);
1736 ops[i].neg = ! this_neg;
1737 changed = 1;
1738 break;
1740 case CONST:
1741 if (n_ops < 7
1742 && GET_CODE (XEXP (this_op, 0)) == PLUS
1743 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1744 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1746 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1747 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1748 ops[n_ops].neg = this_neg;
1749 n_ops++;
1750 input_consts++;
1751 changed = 1;
1753 break;
1755 case NOT:
1756 /* ~a -> (-a - 1) */
1757 if (n_ops != 7)
1759 ops[n_ops].op = constm1_rtx;
1760 ops[n_ops++].neg = this_neg;
1761 ops[i].op = XEXP (this_op, 0);
1762 ops[i].neg = !this_neg;
1763 changed = 1;
1765 break;
1767 case CONST_INT:
1768 if (this_neg)
1770 ops[i].op = neg_const_int (mode, this_op);
1771 ops[i].neg = 0;
1772 changed = 1;
1774 break;
1776 default:
1777 break;
1781 while (changed);
1783 /* If we only have two operands, we can't do anything. */
1784 if (n_ops <= 2 && !force)
1785 return NULL_RTX;
1787 /* Count the number of CONSTs we didn't split above. */
1788 for (i = 0; i < n_ops; i++)
1789 if (GET_CODE (ops[i].op) == CONST)
1790 input_consts++;
1792 /* Now simplify each pair of operands until nothing changes. The first
1793 time through just simplify constants against each other. */
1795 first = 1;
1798 changed = first;
1800 for (i = 0; i < n_ops - 1; i++)
1801 for (j = i + 1; j < n_ops; j++)
1803 rtx lhs = ops[i].op, rhs = ops[j].op;
1804 int lneg = ops[i].neg, rneg = ops[j].neg;
1806 if (lhs != 0 && rhs != 0
1807 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1809 enum rtx_code ncode = PLUS;
1811 if (lneg != rneg)
1813 ncode = MINUS;
1814 if (lneg)
1815 tem = lhs, lhs = rhs, rhs = tem;
1817 else if (swap_commutative_operands_p (lhs, rhs))
1818 tem = lhs, lhs = rhs, rhs = tem;
1820 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1822 /* Reject "simplifications" that just wrap the two
1823 arguments in a CONST. Failure to do so can result
1824 in infinite recursion with simplify_binary_operation
1825 when it calls us to simplify CONST operations. */
1826 if (tem
1827 && ! (GET_CODE (tem) == CONST
1828 && GET_CODE (XEXP (tem, 0)) == ncode
1829 && XEXP (XEXP (tem, 0), 0) == lhs
1830 && XEXP (XEXP (tem, 0), 1) == rhs)
1831 /* Don't allow -x + -1 -> ~x simplifications in the
1832 first pass. This allows us the chance to combine
1833 the -1 with other constants. */
1834 && ! (first
1835 && GET_CODE (tem) == NOT
1836 && XEXP (tem, 0) == rhs))
1838 lneg &= rneg;
1839 if (GET_CODE (tem) == NEG)
1840 tem = XEXP (tem, 0), lneg = !lneg;
1841 if (GET_CODE (tem) == CONST_INT && lneg)
1842 tem = neg_const_int (mode, tem), lneg = 0;
1844 ops[i].op = tem;
1845 ops[i].neg = lneg;
1846 ops[j].op = NULL_RTX;
1847 changed = 1;
1852 first = 0;
1854 while (changed);
1856 /* Pack all the operands to the lower-numbered entries. */
1857 for (i = 0, j = 0; j < n_ops; j++)
1858 if (ops[j].op)
1859 ops[i++] = ops[j];
1860 n_ops = i;
1862 /* Sort the operations based on swap_commutative_operands_p. */
1863 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1865 /* We suppressed creation of trivial CONST expressions in the
1866 combination loop to avoid recursion. Create one manually now.
1867 The combination loop should have ensured that there is exactly
1868 one CONST_INT, and the sort will have ensured that it is last
1869 in the array and that any other constant will be next-to-last. */
1871 if (n_ops > 1
1872 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1873 && CONSTANT_P (ops[n_ops - 2].op))
1875 rtx value = ops[n_ops - 1].op;
1876 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1877 value = neg_const_int (mode, value);
1878 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1879 n_ops--;
1882 /* Count the number of CONSTs that we generated. */
1883 n_consts = 0;
1884 for (i = 0; i < n_ops; i++)
1885 if (GET_CODE (ops[i].op) == CONST)
1886 n_consts++;
1888 /* Give up if we didn't reduce the number of operands we had. Make
1889 sure we count a CONST as two operands. If we have the same
1890 number of operands, but have made more CONSTs than before, this
1891 is also an improvement, so accept it. */
1892 if (!force
1893 && (n_ops + n_consts > input_ops
1894 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1895 return NULL_RTX;
1897 /* Put a non-negated operand first. If there aren't any, make all
1898 operands positive and negate the whole thing later. */
1900 negate = 0;
1901 for (i = 0; i < n_ops && ops[i].neg; i++)
1902 continue;
1903 if (i == n_ops)
1905 for (i = 0; i < n_ops; i++)
1906 ops[i].neg = 0;
1907 negate = 1;
1909 else if (i != 0)
1911 tem = ops[0].op;
1912 ops[0] = ops[i];
1913 ops[i].op = tem;
1914 ops[i].neg = 1;
1917 /* Now make the result by performing the requested operations. */
1918 result = ops[0].op;
1919 for (i = 1; i < n_ops; i++)
1920 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1921 mode, result, ops[i].op);
1923 return negate ? gen_rtx_NEG (mode, result) : result;
1926 struct cfc_args
1928 rtx op0, op1; /* Input */
1929 int equal, op0lt, op1lt; /* Output */
1930 int unordered;
1933 static void
1934 check_fold_consts (data)
1935 PTR data;
1937 struct cfc_args *args = (struct cfc_args *) data;
1938 REAL_VALUE_TYPE d0, d1;
1940 /* We may possibly raise an exception while reading the value. */
1941 args->unordered = 1;
1942 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1943 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1945 /* Comparisons of Inf versus Inf are ordered. */
1946 if (REAL_VALUE_ISNAN (d0)
1947 || REAL_VALUE_ISNAN (d1))
1948 return;
1949 args->equal = REAL_VALUES_EQUAL (d0, d1);
1950 args->op0lt = REAL_VALUES_LESS (d0, d1);
1951 args->op1lt = REAL_VALUES_LESS (d1, d0);
1952 args->unordered = 0;
1955 /* Like simplify_binary_operation except used for relational operators.
1956 MODE is the mode of the operands, not that of the result. If MODE
1957 is VOIDmode, both operands must also be VOIDmode and we compare the
1958 operands in "infinite precision".
1960 If no simplification is possible, this function returns zero. Otherwise,
1961 it returns either const_true_rtx or const0_rtx. */
1964 simplify_relational_operation (code, mode, op0, op1)
1965 enum rtx_code code;
1966 enum machine_mode mode;
1967 rtx op0, op1;
1969 int equal, op0lt, op0ltu, op1lt, op1ltu;
1970 rtx tem;
1971 rtx trueop0;
1972 rtx trueop1;
1974 if (mode == VOIDmode
1975 && (GET_MODE (op0) != VOIDmode
1976 || GET_MODE (op1) != VOIDmode))
1977 abort ();
1979 /* If op0 is a compare, extract the comparison arguments from it. */
1980 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1981 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1983 trueop0 = avoid_constant_pool_reference (op0);
1984 trueop1 = avoid_constant_pool_reference (op1);
1986 /* We can't simplify MODE_CC values since we don't know what the
1987 actual comparison is. */
1988 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1989 #ifdef HAVE_cc0
1990 || op0 == cc0_rtx
1991 #endif
1993 return 0;
1995 /* Make sure the constant is second. */
1996 if (swap_commutative_operands_p (trueop0, trueop1))
1998 tem = op0, op0 = op1, op1 = tem;
1999 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2000 code = swap_condition (code);
2003 /* For integer comparisons of A and B maybe we can simplify A - B and can
2004 then simplify a comparison of that with zero. If A and B are both either
2005 a register or a CONST_INT, this can't help; testing for these cases will
2006 prevent infinite recursion here and speed things up.
2008 If CODE is an unsigned comparison, then we can never do this optimization,
2009 because it gives an incorrect result if the subtraction wraps around zero.
2010 ANSI C defines unsigned operations such that they never overflow, and
2011 thus such cases can not be ignored. */
2013 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2014 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2015 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2016 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2017 && code != GTU && code != GEU && code != LTU && code != LEU)
2018 return simplify_relational_operation (signed_condition (code),
2019 mode, tem, const0_rtx);
2021 if (flag_unsafe_math_optimizations && code == ORDERED)
2022 return const_true_rtx;
2024 if (flag_unsafe_math_optimizations && code == UNORDERED)
2025 return const0_rtx;
2027 /* For modes without NaNs, if the two operands are equal, we know the
2028 result. */
2029 if (!HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1))
2030 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2032 /* If the operands are floating-point constants, see if we can fold
2033 the result. */
2034 else if (GET_CODE (trueop0) == CONST_DOUBLE
2035 && GET_CODE (trueop1) == CONST_DOUBLE
2036 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2038 struct cfc_args args;
2040 /* Setup input for check_fold_consts() */
2041 args.op0 = trueop0;
2042 args.op1 = trueop1;
2045 if (!do_float_handler (check_fold_consts, (PTR) &args))
2046 args.unordered = 1;
2048 if (args.unordered)
2049 switch (code)
2051 case UNEQ:
2052 case UNLT:
2053 case UNGT:
2054 case UNLE:
2055 case UNGE:
2056 case NE:
2057 case UNORDERED:
2058 return const_true_rtx;
2059 case EQ:
2060 case LT:
2061 case GT:
2062 case LE:
2063 case GE:
2064 case LTGT:
2065 case ORDERED:
2066 return const0_rtx;
2067 default:
2068 return 0;
2071 /* Receive output from check_fold_consts() */
2072 equal = args.equal;
2073 op0lt = op0ltu = args.op0lt;
2074 op1lt = op1ltu = args.op1lt;
2077 /* Otherwise, see if the operands are both integers. */
2078 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2079 && (GET_CODE (trueop0) == CONST_DOUBLE
2080 || GET_CODE (trueop0) == CONST_INT)
2081 && (GET_CODE (trueop1) == CONST_DOUBLE
2082 || GET_CODE (trueop1) == CONST_INT))
2084 int width = GET_MODE_BITSIZE (mode);
2085 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2086 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2088 /* Get the two words comprising each integer constant. */
2089 if (GET_CODE (trueop0) == CONST_DOUBLE)
2091 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2092 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2094 else
2096 l0u = l0s = INTVAL (trueop0);
2097 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2100 if (GET_CODE (trueop1) == CONST_DOUBLE)
2102 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2103 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2105 else
2107 l1u = l1s = INTVAL (trueop1);
2108 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2111 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2112 we have to sign or zero-extend the values. */
2113 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2115 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2116 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2118 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2119 l0s |= ((HOST_WIDE_INT) (-1) << width);
2121 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2122 l1s |= ((HOST_WIDE_INT) (-1) << width);
2124 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2125 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2127 equal = (h0u == h1u && l0u == l1u);
2128 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2129 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2130 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2131 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2134 /* Otherwise, there are some code-specific tests we can make. */
2135 else
2137 switch (code)
2139 case EQ:
2140 /* References to the frame plus a constant or labels cannot
2141 be zero, but a SYMBOL_REF can due to #pragma weak. */
2142 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2143 || GET_CODE (trueop0) == LABEL_REF)
2144 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2145 /* On some machines, the ap reg can be 0 sometimes. */
2146 && op0 != arg_pointer_rtx
2147 #endif
2149 return const0_rtx;
2150 break;
2152 case NE:
2153 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2154 || GET_CODE (trueop0) == LABEL_REF)
2155 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2156 && op0 != arg_pointer_rtx
2157 #endif
2159 return const_true_rtx;
2160 break;
2162 case GEU:
2163 /* Unsigned values are never negative. */
2164 if (trueop1 == const0_rtx)
2165 return const_true_rtx;
2166 break;
2168 case LTU:
2169 if (trueop1 == const0_rtx)
2170 return const0_rtx;
2171 break;
2173 case LEU:
2174 /* Unsigned values are never greater than the largest
2175 unsigned value. */
2176 if (GET_CODE (trueop1) == CONST_INT
2177 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2178 && INTEGRAL_MODE_P (mode))
2179 return const_true_rtx;
2180 break;
2182 case GTU:
2183 if (GET_CODE (trueop1) == CONST_INT
2184 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2185 && INTEGRAL_MODE_P (mode))
2186 return const0_rtx;
2187 break;
2189 default:
2190 break;
2193 return 0;
2196 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2197 as appropriate. */
2198 switch (code)
2200 case EQ:
2201 case UNEQ:
2202 return equal ? const_true_rtx : const0_rtx;
2203 case NE:
2204 case LTGT:
2205 return ! equal ? const_true_rtx : const0_rtx;
2206 case LT:
2207 case UNLT:
2208 return op0lt ? const_true_rtx : const0_rtx;
2209 case GT:
2210 case UNGT:
2211 return op1lt ? const_true_rtx : const0_rtx;
2212 case LTU:
2213 return op0ltu ? const_true_rtx : const0_rtx;
2214 case GTU:
2215 return op1ltu ? const_true_rtx : const0_rtx;
2216 case LE:
2217 case UNLE:
2218 return equal || op0lt ? const_true_rtx : const0_rtx;
2219 case GE:
2220 case UNGE:
2221 return equal || op1lt ? const_true_rtx : const0_rtx;
2222 case LEU:
2223 return equal || op0ltu ? const_true_rtx : const0_rtx;
2224 case GEU:
2225 return equal || op1ltu ? const_true_rtx : const0_rtx;
2226 case ORDERED:
2227 return const_true_rtx;
2228 case UNORDERED:
2229 return const0_rtx;
2230 default:
2231 abort ();
2235 /* Simplify CODE, an operation with result mode MODE and three operands,
2236 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2237 a constant. Return 0 if no simplifications is possible. */
2240 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2241 enum rtx_code code;
2242 enum machine_mode mode, op0_mode;
2243 rtx op0, op1, op2;
2245 unsigned int width = GET_MODE_BITSIZE (mode);
2247 /* VOIDmode means "infinite" precision. */
2248 if (width == 0)
2249 width = HOST_BITS_PER_WIDE_INT;
2251 switch (code)
2253 case SIGN_EXTRACT:
2254 case ZERO_EXTRACT:
2255 if (GET_CODE (op0) == CONST_INT
2256 && GET_CODE (op1) == CONST_INT
2257 && GET_CODE (op2) == CONST_INT
2258 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2259 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2261 /* Extracting a bit-field from a constant */
2262 HOST_WIDE_INT val = INTVAL (op0);
2264 if (BITS_BIG_ENDIAN)
2265 val >>= (GET_MODE_BITSIZE (op0_mode)
2266 - INTVAL (op2) - INTVAL (op1));
2267 else
2268 val >>= INTVAL (op2);
2270 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2272 /* First zero-extend. */
2273 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2274 /* If desired, propagate sign bit. */
2275 if (code == SIGN_EXTRACT
2276 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2277 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2280 /* Clear the bits that don't belong in our mode,
2281 unless they and our sign bit are all one.
2282 So we get either a reasonable negative value or a reasonable
2283 unsigned value for this mode. */
2284 if (width < HOST_BITS_PER_WIDE_INT
2285 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2286 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2287 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2289 return GEN_INT (val);
2291 break;
2293 case IF_THEN_ELSE:
2294 if (GET_CODE (op0) == CONST_INT)
2295 return op0 != const0_rtx ? op1 : op2;
2297 /* Convert a == b ? b : a to "a". */
2298 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2299 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2300 && rtx_equal_p (XEXP (op0, 0), op1)
2301 && rtx_equal_p (XEXP (op0, 1), op2))
2302 return op1;
2303 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2304 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2305 && rtx_equal_p (XEXP (op0, 1), op1)
2306 && rtx_equal_p (XEXP (op0, 0), op2))
2307 return op2;
2308 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2310 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2311 ? GET_MODE (XEXP (op0, 1))
2312 : GET_MODE (XEXP (op0, 0)));
2313 rtx temp;
2314 if (cmp_mode == VOIDmode)
2315 cmp_mode = op0_mode;
2316 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2317 XEXP (op0, 0), XEXP (op0, 1));
2319 /* See if any simplifications were possible. */
2320 if (temp == const0_rtx)
2321 return op2;
2322 else if (temp == const1_rtx)
2323 return op1;
2324 else if (temp)
2325 op0 = temp;
2327 /* Look for happy constants in op1 and op2. */
2328 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2330 HOST_WIDE_INT t = INTVAL (op1);
2331 HOST_WIDE_INT f = INTVAL (op2);
2333 if (t == STORE_FLAG_VALUE && f == 0)
2334 code = GET_CODE (op0);
2335 else if (t == 0 && f == STORE_FLAG_VALUE)
2337 enum rtx_code tmp;
2338 tmp = reversed_comparison_code (op0, NULL_RTX);
2339 if (tmp == UNKNOWN)
2340 break;
2341 code = tmp;
2343 else
2344 break;
2346 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2349 break;
2351 default:
2352 abort ();
2355 return 0;
2358 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2359 Return 0 if no simplifications is possible. */
2361 simplify_subreg (outermode, op, innermode, byte)
2362 rtx op;
2363 unsigned int byte;
2364 enum machine_mode outermode, innermode;
2366 /* Little bit of sanity checking. */
2367 if (innermode == VOIDmode || outermode == VOIDmode
2368 || innermode == BLKmode || outermode == BLKmode)
2369 abort ();
2371 if (GET_MODE (op) != innermode
2372 && GET_MODE (op) != VOIDmode)
2373 abort ();
2375 if (byte % GET_MODE_SIZE (outermode)
2376 || byte >= GET_MODE_SIZE (innermode))
2377 abort ();
2379 if (outermode == innermode && !byte)
2380 return op;
2382 /* Attempt to simplify constant to non-SUBREG expression. */
2383 if (CONSTANT_P (op))
2385 int offset, part;
2386 unsigned HOST_WIDE_INT val = 0;
2388 /* ??? This code is partly redundant with code below, but can handle
2389 the subregs of floats and similar corner cases.
2390 Later it we should move all simplification code here and rewrite
2391 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2392 using SIMPLIFY_SUBREG. */
2393 if (subreg_lowpart_offset (outermode, innermode) == byte)
2395 rtx new = gen_lowpart_if_possible (outermode, op);
2396 if (new)
2397 return new;
2400 /* Similar comment as above apply here. */
2401 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2402 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2403 && GET_MODE_CLASS (outermode) == MODE_INT)
2405 rtx new = constant_subword (op,
2406 (byte / UNITS_PER_WORD),
2407 innermode);
2408 if (new)
2409 return new;
2412 offset = byte * BITS_PER_UNIT;
2413 switch (GET_CODE (op))
2415 case CONST_DOUBLE:
2416 if (GET_MODE (op) != VOIDmode)
2417 break;
2419 /* We can't handle this case yet. */
2420 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2421 return NULL_RTX;
2423 part = offset >= HOST_BITS_PER_WIDE_INT;
2424 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2425 && BYTES_BIG_ENDIAN)
2426 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2427 && WORDS_BIG_ENDIAN))
2428 part = !part;
2429 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2430 offset %= HOST_BITS_PER_WIDE_INT;
2432 /* We've already picked the word we want from a double, so
2433 pretend this is actually an integer. */
2434 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2436 /* FALLTHROUGH */
2437 case CONST_INT:
2438 if (GET_CODE (op) == CONST_INT)
2439 val = INTVAL (op);
2441 /* We don't handle synthetizing of non-integral constants yet. */
2442 if (GET_MODE_CLASS (outermode) != MODE_INT)
2443 return NULL_RTX;
2445 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2447 if (WORDS_BIG_ENDIAN)
2448 offset = (GET_MODE_BITSIZE (innermode)
2449 - GET_MODE_BITSIZE (outermode) - offset);
2450 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2451 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2452 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2453 - 2 * (offset % BITS_PER_WORD));
2456 if (offset >= HOST_BITS_PER_WIDE_INT)
2457 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2458 else
2460 val >>= offset;
2461 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2462 val = trunc_int_for_mode (val, outermode);
2463 return GEN_INT (val);
2465 default:
2466 break;
2470 /* Changing mode twice with SUBREG => just change it once,
2471 or not at all if changing back op starting mode. */
2472 if (GET_CODE (op) == SUBREG)
2474 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2475 int final_offset = byte + SUBREG_BYTE (op);
2476 rtx new;
2478 if (outermode == innermostmode
2479 && byte == 0 && SUBREG_BYTE (op) == 0)
2480 return SUBREG_REG (op);
2482 /* The SUBREG_BYTE represents offset, as if the value were stored
2483 in memory. Irritating exception is paradoxical subreg, where
2484 we define SUBREG_BYTE to be 0. On big endian machines, this
2485 value should be negative. For a moment, undo this exception. */
2486 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2488 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2489 if (WORDS_BIG_ENDIAN)
2490 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2491 if (BYTES_BIG_ENDIAN)
2492 final_offset += difference % UNITS_PER_WORD;
2494 if (SUBREG_BYTE (op) == 0
2495 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2497 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2498 if (WORDS_BIG_ENDIAN)
2499 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2500 if (BYTES_BIG_ENDIAN)
2501 final_offset += difference % UNITS_PER_WORD;
2504 /* See whether resulting subreg will be paradoxical. */
2505 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2507 /* In nonparadoxical subregs we can't handle negative offsets. */
2508 if (final_offset < 0)
2509 return NULL_RTX;
2510 /* Bail out in case resulting subreg would be incorrect. */
2511 if (final_offset % GET_MODE_SIZE (outermode)
2512 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2513 return NULL_RTX;
2515 else
2517 int offset = 0;
2518 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2520 /* In paradoxical subreg, see if we are still looking on lower part.
2521 If so, our SUBREG_BYTE will be 0. */
2522 if (WORDS_BIG_ENDIAN)
2523 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2524 if (BYTES_BIG_ENDIAN)
2525 offset += difference % UNITS_PER_WORD;
2526 if (offset == final_offset)
2527 final_offset = 0;
2528 else
2529 return NULL_RTX;
2532 /* Recurse for futher possible simplifications. */
2533 new = simplify_subreg (outermode, SUBREG_REG (op),
2534 GET_MODE (SUBREG_REG (op)),
2535 final_offset);
2536 if (new)
2537 return new;
2538 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2541 /* SUBREG of a hard register => just change the register number
2542 and/or mode. If the hard register is not valid in that mode,
2543 suppress this simplification. If the hard register is the stack,
2544 frame, or argument pointer, leave this as a SUBREG. */
2546 if (REG_P (op)
2547 && (! REG_FUNCTION_VALUE_P (op)
2548 || ! rtx_equal_function_value_matters)
2549 #ifdef CLASS_CANNOT_CHANGE_MODE
2550 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2551 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2552 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2553 && (TEST_HARD_REG_BIT
2554 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2555 REGNO (op))))
2556 #endif
2557 && REGNO (op) < FIRST_PSEUDO_REGISTER
2558 && ((reload_completed && !frame_pointer_needed)
2559 || (REGNO (op) != FRAME_POINTER_REGNUM
2560 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2561 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2562 #endif
2564 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2565 && REGNO (op) != ARG_POINTER_REGNUM
2566 #endif
2567 && REGNO (op) != STACK_POINTER_REGNUM)
2569 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2572 /* ??? We do allow it if the current REG is not valid for
2573 its mode. This is a kludge to work around how float/complex
2574 arguments are passed on 32-bit Sparc and should be fixed. */
2575 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2576 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2578 rtx x = gen_rtx_REG (outermode, final_regno);
2580 /* Propagate original regno. We don't have any way to specify
2581 the offset inside orignal regno, so do so only for lowpart.
2582 The information is used only by alias analysis that can not
2583 grog partial register anyway. */
2585 if (subreg_lowpart_offset (outermode, innermode) == byte)
2586 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2587 return x;
2591 /* If we have a SUBREG of a register that we are replacing and we are
2592 replacing it with a MEM, make a new MEM and try replacing the
2593 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2594 or if we would be widening it. */
2596 if (GET_CODE (op) == MEM
2597 && ! mode_dependent_address_p (XEXP (op, 0))
2598 /* Allow splitting of volatile memory references in case we don't
2599 have instruction to move the whole thing. */
2600 && (! MEM_VOLATILE_P (op)
2601 || ! have_insn_for (SET, innermode))
2602 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2603 return adjust_address_nv (op, outermode, byte);
2605 /* Handle complex values represented as CONCAT
2606 of real and imaginary part. */
2607 if (GET_CODE (op) == CONCAT)
2609 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2610 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2611 unsigned int final_offset;
2612 rtx res;
2614 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2615 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2616 if (res)
2617 return res;
2618 /* We can at least simplify it by referring directly to the relevant part. */
2619 return gen_rtx_SUBREG (outermode, part, final_offset);
2622 return NULL_RTX;
2624 /* Make a SUBREG operation or equivalent if it folds. */
2627 simplify_gen_subreg (outermode, op, innermode, byte)
2628 rtx op;
2629 unsigned int byte;
2630 enum machine_mode outermode, innermode;
2632 rtx new;
2633 /* Little bit of sanity checking. */
2634 if (innermode == VOIDmode || outermode == VOIDmode
2635 || innermode == BLKmode || outermode == BLKmode)
2636 abort ();
2638 if (GET_MODE (op) != innermode
2639 && GET_MODE (op) != VOIDmode)
2640 abort ();
2642 if (byte % GET_MODE_SIZE (outermode)
2643 || byte >= GET_MODE_SIZE (innermode))
2644 abort ();
2646 if (GET_CODE (op) == QUEUED)
2647 return NULL_RTX;
2649 new = simplify_subreg (outermode, op, innermode, byte);
2650 if (new)
2651 return new;
2653 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2654 return NULL_RTX;
2656 return gen_rtx_SUBREG (outermode, op, byte);
2658 /* Simplify X, an rtx expression.
2660 Return the simplified expression or NULL if no simplifications
2661 were possible.
2663 This is the preferred entry point into the simplification routines;
2664 however, we still allow passes to call the more specific routines.
2666 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2667 code that need to be unified.
2669 1. fold_rtx in cse.c. This code uses various CSE specific
2670 information to aid in RTL simplification.
2672 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2673 it uses combine specific information to aid in RTL
2674 simplification.
2676 3. The routines in this file.
2679 Long term we want to only have one body of simplification code; to
2680 get to that state I recommend the following steps:
2682 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2683 which are not pass dependent state into these routines.
2685 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2686 use this routine whenever possible.
2688 3. Allow for pass dependent state to be provided to these
2689 routines and add simplifications based on the pass dependent
2690 state. Remove code from cse.c & combine.c that becomes
2691 redundant/dead.
2693 It will take time, but ultimately the compiler will be easier to
2694 maintain and improve. It's totally silly that when we add a
2695 simplification that it needs to be added to 4 places (3 for RTL
2696 simplification and 1 for tree simplification. */
2699 simplify_rtx (x)
2700 rtx x;
2702 enum rtx_code code = GET_CODE (x);
2703 enum machine_mode mode = GET_MODE (x);
2705 switch (GET_RTX_CLASS (code))
2707 case '1':
2708 return simplify_unary_operation (code, mode,
2709 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2710 case 'c':
2711 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2713 rtx tem;
2715 tem = XEXP (x, 0);
2716 XEXP (x, 0) = XEXP (x, 1);
2717 XEXP (x, 1) = tem;
2718 return simplify_binary_operation (code, mode,
2719 XEXP (x, 0), XEXP (x, 1));
2722 case '2':
2723 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2725 case '3':
2726 case 'b':
2727 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2728 XEXP (x, 0), XEXP (x, 1),
2729 XEXP (x, 2));
2731 case '<':
2732 return simplify_relational_operation (code,
2733 ((GET_MODE (XEXP (x, 0))
2734 != VOIDmode)
2735 ? GET_MODE (XEXP (x, 0))
2736 : GET_MODE (XEXP (x, 1))),
2737 XEXP (x, 0), XEXP (x, 1));
2738 case 'x':
2739 /* The only case we try to handle is a SUBREG. */
2740 if (code == SUBREG)
2741 return simplify_gen_subreg (mode, SUBREG_REG (x),
2742 GET_MODE (SUBREG_REG (x)),
2743 SUBREG_BYTE (x));
2744 return NULL;
2745 default:
2746 return NULL;