2000-05-02 Jeff Sturm <jsturm@one-point.com>
[official-gcc.git] / gcc / simplify-rtx.c
blob046882002a50a7e7be49aef8a807ac93d8e59a90
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
25 #include <setjmp.h>
27 #include "rtl.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "flags.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "function.h"
36 #include "expr.h"
37 #include "toplev.h"
38 #include "output.h"
39 #include "ggc.h"
41 /* Simplification and canonicalization of RTL. */
43 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
44 virtual regs here because the simplify_*_operation routines are called
45 by integrate.c, which is called before virtual register instantiation.
47 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
48 a header file so that their definitions can be shared with the
49 simplification routines in simplify-rtx.c. Until then, do not
50 change these macros without also changing the copy in simplify-rtx.c. */
52 #define FIXED_BASE_PLUS_P(X) \
53 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
54 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
55 || (X) == virtual_stack_vars_rtx \
56 || (X) == virtual_incoming_args_rtx \
57 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
58 && (XEXP (X, 0) == frame_pointer_rtx \
59 || XEXP (X, 0) == hard_frame_pointer_rtx \
60 || ((X) == arg_pointer_rtx \
61 && fixed_regs[ARG_POINTER_REGNUM]) \
62 || XEXP (X, 0) == virtual_stack_vars_rtx \
63 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
64 || GET_CODE (X) == ADDRESSOF)
66 /* Similar, but also allows reference to the stack pointer.
68 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
69 arg_pointer_rtx by itself is nonzero, because on at least one machine,
70 the i960, the arg pointer is zero when it is unused. */
72 #define NONZERO_BASE_PLUS_P(X) \
73 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
74 || (X) == virtual_stack_vars_rtx \
75 || (X) == virtual_incoming_args_rtx \
76 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
77 && (XEXP (X, 0) == frame_pointer_rtx \
78 || XEXP (X, 0) == hard_frame_pointer_rtx \
79 || ((X) == arg_pointer_rtx \
80 && fixed_regs[ARG_POINTER_REGNUM]) \
81 || XEXP (X, 0) == virtual_stack_vars_rtx \
82 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
83 || (X) == stack_pointer_rtx \
84 || (X) == virtual_stack_dynamic_rtx \
85 || (X) == virtual_outgoing_args_rtx \
86 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
87 && (XEXP (X, 0) == stack_pointer_rtx \
88 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
89 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
90 || GET_CODE (X) == ADDRESSOF)
92 /* Much code operates on (low, high) pairs; the low value is an
93 unsigned wide int, the high value a signed wide int. We
94 occasionally need to sign extend from low to high as if low were a
95 signed wide int. */
96 #define HWI_SIGN_EXTEND(low) \
97 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
99 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
100 enum machine_mode, rtx, rtx));
101 static void check_fold_consts PARAMS ((PTR));
103 /* Make a binary operation by properly ordering the operands and
104 seeing if the expression folds. */
107 simplify_gen_binary (code, mode, op0, op1)
108 enum rtx_code code;
109 enum machine_mode mode;
110 rtx op0, op1;
112 rtx tem;
114 /* Put complex operands first and constants second if commutative. */
115 if (GET_RTX_CLASS (code) == 'c'
116 && ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
117 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
118 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
119 || (GET_CODE (op0) == SUBREG
120 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
121 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')))
122 tem = op0, op0 = op1, op1 = tem;
124 /* If this simplifies, do it. */
125 tem = simplify_binary_operation (code, mode, op0, op1);
127 if (tem)
128 return tem;
130 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
131 just form the operation. */
133 if (code == PLUS && GET_CODE (op1) == CONST_INT
134 && GET_MODE (op0) != VOIDmode)
135 return plus_constant (op0, INTVAL (op1));
136 else if (code == MINUS && GET_CODE (op1) == CONST_INT
137 && GET_MODE (op0) != VOIDmode)
138 return plus_constant (op0, - INTVAL (op1));
139 else
140 return gen_rtx_fmt_ee (code, mode, op0, op1);
143 /* Make a unary operation by first seeing if it folds and otherwise making
144 the specified operation. */
147 simplify_gen_unary (code, mode, op, op_mode)
148 enum rtx_code code;
149 enum machine_mode mode;
150 rtx op;
151 enum machine_mode op_mode;
153 rtx tem;
155 /* If this simplifies, use it. */
156 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
157 return tem;
159 return gen_rtx_fmt_e (code, mode, op);
162 /* Likewise for ternary operations. */
165 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
166 enum rtx_code code;
167 enum machine_mode mode, op0_mode;
168 rtx op0, op1, op2;
170 rtx tem;
172 /* If this simplifies, use it. */
173 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
174 op0, op1, op2)))
175 return tem;
177 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
180 /* Likewise, for relational operations.
181 CMP_MODE specifies mode comparison is done in.
185 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
186 enum rtx_code code;
187 enum machine_mode mode;
188 enum machine_mode cmp_mode;
189 rtx op0, op1;
191 rtx tem;
193 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
194 return tem;
196 /* Put complex operands first and constants second. */
197 if ((CONSTANT_P (op0) && GET_CODE (op1) != CONST_INT)
198 || (GET_RTX_CLASS (GET_CODE (op0)) == 'o'
199 && GET_RTX_CLASS (GET_CODE (op1)) != 'o')
200 || (GET_CODE (op0) == SUBREG
201 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0))) == 'o'
202 && GET_RTX_CLASS (GET_CODE (op1)) != 'o'))
203 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
205 return gen_rtx_fmt_ee (code, mode, op0, op1);
208 /* Replace all occurrences of OLD in X with NEW and try to simplify the
209 resulting RTX. Return a new RTX which is as simplified as possible. */
212 simplify_replace_rtx (x, old, new)
213 rtx x;
214 rtx old;
215 rtx new;
217 enum rtx_code code = GET_CODE (x);
218 enum machine_mode mode = GET_MODE (x);
220 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
221 to build a new expression substituting recursively. If we can't do
222 anything, return our input. */
224 if (x == old)
225 return new;
227 switch (GET_RTX_CLASS (code))
229 case '1':
231 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
232 rtx op = (XEXP (x, 0) == old
233 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
235 return simplify_gen_unary (code, mode, op, op_mode);
238 case '2':
239 case 'c':
240 return
241 simplify_gen_binary (code, mode,
242 simplify_replace_rtx (XEXP (x, 0), old, new),
243 simplify_replace_rtx (XEXP (x, 1), old, new));
244 case '<':
245 return
246 simplify_gen_relational (code, mode,
247 (GET_MODE (XEXP (x, 0)) != VOIDmode
248 ? GET_MODE (XEXP (x, 0))
249 : GET_MODE (XEXP (x, 1))),
250 simplify_replace_rtx (XEXP (x, 0), old, new),
251 simplify_replace_rtx (XEXP (x, 1), old, new));
253 case '3':
254 case 'b':
255 return
256 simplify_gen_ternary (code, mode, GET_MODE (XEXP (x, 0)),
257 simplify_replace_rtx (XEXP (x, 0), old, new),
258 simplify_replace_rtx (XEXP (x, 1), old, new),
259 simplify_replace_rtx (XEXP (x, 2), old, new));
261 case 'x':
262 /* The only case we try to handle is a lowpart SUBREG of a single-word
263 CONST_INT. */
264 if (code == SUBREG && subreg_lowpart_p (x) && old == SUBREG_REG (x)
265 && GET_CODE (new) == CONST_INT
266 && GET_MODE_SIZE (GET_MODE (old)) <= UNITS_PER_WORD)
267 return GEN_INT (INTVAL (new) & GET_MODE_MASK (mode));
269 return x;
271 default:
272 if (GET_CODE (x) == MEM)
274 /* We can't use change_address here, since it verifies memory address
275 for corectness. We don't want such check, since we may handle
276 addresses previously incorect (such as ones in push instructions)
277 and it is caller's work to verify whether resulting insn match. */
278 rtx addr = simplify_replace_rtx (XEXP (x, 0), old, new);
279 rtx mem;
280 if (XEXP (x, 0) != addr)
282 mem = gen_rtx_MEM (GET_MODE (x), addr);
283 MEM_COPY_ATTRIBUTES (mem, x);
285 else
286 mem = x;
287 return mem;
290 return x;
292 return x;
295 /* Try to simplify a unary operation CODE whose output mode is to be
296 MODE with input operand OP whose mode was originally OP_MODE.
297 Return zero if no simplification can be made. */
300 simplify_unary_operation (code, mode, op, op_mode)
301 enum rtx_code code;
302 enum machine_mode mode;
303 rtx op;
304 enum machine_mode op_mode;
306 unsigned int width = GET_MODE_BITSIZE (mode);
308 /* The order of these tests is critical so that, for example, we don't
309 check the wrong mode (input vs. output) for a conversion operation,
310 such as FIX. At some point, this should be simplified. */
312 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
314 if (code == FLOAT && GET_MODE (op) == VOIDmode
315 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
317 HOST_WIDE_INT hv, lv;
318 REAL_VALUE_TYPE d;
320 if (GET_CODE (op) == CONST_INT)
321 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
322 else
323 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
325 #ifdef REAL_ARITHMETIC
326 REAL_VALUE_FROM_INT (d, lv, hv, mode);
327 #else
328 if (hv < 0)
330 d = (double) (~ hv);
331 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
332 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
333 d += (double) (unsigned HOST_WIDE_INT) (~ lv);
334 d = (- d - 1.0);
336 else
338 d = (double) hv;
339 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
340 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
341 d += (double) (unsigned HOST_WIDE_INT) lv;
343 #endif /* REAL_ARITHMETIC */
344 d = real_value_truncate (mode, d);
345 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
347 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
348 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
350 HOST_WIDE_INT hv, lv;
351 REAL_VALUE_TYPE d;
353 if (GET_CODE (op) == CONST_INT)
354 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
355 else
356 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
358 if (op_mode == VOIDmode)
360 /* We don't know how to interpret negative-looking numbers in
361 this case, so don't try to fold those. */
362 if (hv < 0)
363 return 0;
365 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
367 else
368 hv = 0, lv &= GET_MODE_MASK (op_mode);
370 #ifdef REAL_ARITHMETIC
371 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
372 #else
374 d = (double) (unsigned HOST_WIDE_INT) hv;
375 d *= ((double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2))
376 * (double) ((HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)));
377 d += (double) (unsigned HOST_WIDE_INT) lv;
378 #endif /* REAL_ARITHMETIC */
379 d = real_value_truncate (mode, d);
380 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
382 #endif
384 if (GET_CODE (op) == CONST_INT
385 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
387 register HOST_WIDE_INT arg0 = INTVAL (op);
388 register HOST_WIDE_INT val;
390 switch (code)
392 case NOT:
393 val = ~ arg0;
394 break;
396 case NEG:
397 val = - arg0;
398 break;
400 case ABS:
401 val = (arg0 >= 0 ? arg0 : - arg0);
402 break;
404 case FFS:
405 /* Don't use ffs here. Instead, get low order bit and then its
406 number. If arg0 is zero, this will return 0, as desired. */
407 arg0 &= GET_MODE_MASK (mode);
408 val = exact_log2 (arg0 & (- arg0)) + 1;
409 break;
411 case TRUNCATE:
412 val = arg0;
413 break;
415 case ZERO_EXTEND:
416 if (op_mode == VOIDmode)
417 op_mode = mode;
418 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
420 /* If we were really extending the mode,
421 we would have to distinguish between zero-extension
422 and sign-extension. */
423 if (width != GET_MODE_BITSIZE (op_mode))
424 abort ();
425 val = arg0;
427 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
428 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
429 else
430 return 0;
431 break;
433 case SIGN_EXTEND:
434 if (op_mode == VOIDmode)
435 op_mode = mode;
436 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
438 /* If we were really extending the mode,
439 we would have to distinguish between zero-extension
440 and sign-extension. */
441 if (width != GET_MODE_BITSIZE (op_mode))
442 abort ();
443 val = arg0;
445 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
448 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
449 if (val
450 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
451 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
453 else
454 return 0;
455 break;
457 case SQRT:
458 case FLOAT_EXTEND:
459 case FLOAT_TRUNCATE:
460 return 0;
462 default:
463 abort ();
466 val = trunc_int_for_mode (val, mode);
468 return GEN_INT (val);
471 /* We can do some operations on integer CONST_DOUBLEs. Also allow
472 for a DImode operation on a CONST_INT. */
473 else if (GET_MODE (op) == VOIDmode && width <= HOST_BITS_PER_INT * 2
474 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
476 unsigned HOST_WIDE_INT l1, lv;
477 HOST_WIDE_INT h1, hv;
479 if (GET_CODE (op) == CONST_DOUBLE)
480 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
481 else
482 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
484 switch (code)
486 case NOT:
487 lv = ~ l1;
488 hv = ~ h1;
489 break;
491 case NEG:
492 neg_double (l1, h1, &lv, &hv);
493 break;
495 case ABS:
496 if (h1 < 0)
497 neg_double (l1, h1, &lv, &hv);
498 else
499 lv = l1, hv = h1;
500 break;
502 case FFS:
503 hv = 0;
504 if (l1 == 0)
505 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
506 else
507 lv = exact_log2 (l1 & (-l1)) + 1;
508 break;
510 case TRUNCATE:
511 /* This is just a change-of-mode, so do nothing. */
512 lv = l1, hv = h1;
513 break;
515 case ZERO_EXTEND:
516 if (op_mode == VOIDmode
517 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
518 return 0;
520 hv = 0;
521 lv = l1 & GET_MODE_MASK (op_mode);
522 break;
524 case SIGN_EXTEND:
525 if (op_mode == VOIDmode
526 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
527 return 0;
528 else
530 lv = l1 & GET_MODE_MASK (op_mode);
531 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
532 && (lv & ((HOST_WIDE_INT) 1
533 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
534 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
536 hv = HWI_SIGN_EXTEND (lv);
538 break;
540 case SQRT:
541 return 0;
543 default:
544 return 0;
547 return immed_double_const (lv, hv, mode);
550 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
551 else if (GET_CODE (op) == CONST_DOUBLE
552 && GET_MODE_CLASS (mode) == MODE_FLOAT)
554 REAL_VALUE_TYPE d;
555 jmp_buf handler;
556 rtx x;
558 if (setjmp (handler))
559 /* There used to be a warning here, but that is inadvisable.
560 People may want to cause traps, and the natural way
561 to do it should not get a warning. */
562 return 0;
564 set_float_handler (handler);
566 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
568 switch (code)
570 case NEG:
571 d = REAL_VALUE_NEGATE (d);
572 break;
574 case ABS:
575 if (REAL_VALUE_NEGATIVE (d))
576 d = REAL_VALUE_NEGATE (d);
577 break;
579 case FLOAT_TRUNCATE:
580 d = real_value_truncate (mode, d);
581 break;
583 case FLOAT_EXTEND:
584 /* All this does is change the mode. */
585 break;
587 case FIX:
588 d = REAL_VALUE_RNDZINT (d);
589 break;
591 case UNSIGNED_FIX:
592 d = REAL_VALUE_UNSIGNED_RNDZINT (d);
593 break;
595 case SQRT:
596 return 0;
598 default:
599 abort ();
602 x = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
603 set_float_handler (NULL);
604 return x;
607 else if (GET_CODE (op) == CONST_DOUBLE
608 && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
609 && GET_MODE_CLASS (mode) == MODE_INT
610 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
612 REAL_VALUE_TYPE d;
613 jmp_buf handler;
614 HOST_WIDE_INT val;
616 if (setjmp (handler))
617 return 0;
619 set_float_handler (handler);
621 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
623 switch (code)
625 case FIX:
626 val = REAL_VALUE_FIX (d);
627 break;
629 case UNSIGNED_FIX:
630 val = REAL_VALUE_UNSIGNED_FIX (d);
631 break;
633 default:
634 abort ();
637 set_float_handler (NULL);
639 val = trunc_int_for_mode (val, mode);
641 return GEN_INT (val);
643 #endif
644 /* This was formerly used only for non-IEEE float.
645 eggert@twinsun.com says it is safe for IEEE also. */
646 else
648 enum rtx_code reversed;
649 /* There are some simplifications we can do even if the operands
650 aren't constant. */
651 switch (code)
653 case NOT:
654 /* (not (not X)) == X. */
655 if (GET_CODE (op) == NOT)
656 return XEXP (op, 0);
658 /* (not (eq X Y)) == (ne X Y), etc. */
659 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
660 && ((reversed = reversed_comparison_code (op, NULL_RTX))
661 != UNKNOWN))
662 return gen_rtx_fmt_ee (reversed,
663 op_mode, XEXP (op, 0), XEXP (op, 1));
664 break;
666 case NEG:
667 /* (neg (neg X)) == X. */
668 if (GET_CODE (op) == NEG)
669 return XEXP (op, 0);
670 break;
672 case SIGN_EXTEND:
673 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
674 becomes just the MINUS if its mode is MODE. This allows
675 folding switch statements on machines using casesi (such as
676 the Vax). */
677 if (GET_CODE (op) == TRUNCATE
678 && GET_MODE (XEXP (op, 0)) == mode
679 && GET_CODE (XEXP (op, 0)) == MINUS
680 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
681 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
682 return XEXP (op, 0);
684 #ifdef POINTERS_EXTEND_UNSIGNED
685 if (! POINTERS_EXTEND_UNSIGNED
686 && mode == Pmode && GET_MODE (op) == ptr_mode
687 && (CONSTANT_P (op)
688 || (GET_CODE (op) == SUBREG
689 && GET_CODE (SUBREG_REG (op)) == REG
690 && REG_POINTER (SUBREG_REG (op))
691 && GET_MODE (SUBREG_REG (op)) == Pmode)))
692 return convert_memory_address (Pmode, op);
693 #endif
694 break;
696 #ifdef POINTERS_EXTEND_UNSIGNED
697 case ZERO_EXTEND:
698 if (POINTERS_EXTEND_UNSIGNED
699 && mode == Pmode && GET_MODE (op) == ptr_mode
700 && (CONSTANT_P (op)
701 || (GET_CODE (op) == SUBREG
702 && GET_CODE (SUBREG_REG (op)) == REG
703 && REG_POINTER (SUBREG_REG (op))
704 && GET_MODE (SUBREG_REG (op)) == Pmode)))
705 return convert_memory_address (Pmode, op);
706 break;
707 #endif
709 default:
710 break;
713 return 0;
717 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
718 and OP1. Return 0 if no simplification is possible.
720 Don't use this for relational operations such as EQ or LT.
721 Use simplify_relational_operation instead. */
724 simplify_binary_operation (code, mode, op0, op1)
725 enum rtx_code code;
726 enum machine_mode mode;
727 rtx op0, op1;
729 register HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
730 HOST_WIDE_INT val;
731 unsigned int width = GET_MODE_BITSIZE (mode);
732 rtx tem;
734 /* Relational operations don't work here. We must know the mode
735 of the operands in order to do the comparison correctly.
736 Assuming a full word can give incorrect results.
737 Consider comparing 128 with -128 in QImode. */
739 if (GET_RTX_CLASS (code) == '<')
740 abort ();
742 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
743 if (GET_MODE_CLASS (mode) == MODE_FLOAT
744 && GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
745 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
747 REAL_VALUE_TYPE f0, f1, value;
748 jmp_buf handler;
750 if (setjmp (handler))
751 return 0;
753 set_float_handler (handler);
755 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
756 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
757 f0 = real_value_truncate (mode, f0);
758 f1 = real_value_truncate (mode, f1);
760 #ifdef REAL_ARITHMETIC
761 #ifndef REAL_INFINITY
762 if (code == DIV && REAL_VALUES_EQUAL (f1, dconst0))
763 return 0;
764 #endif
765 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
766 #else
767 switch (code)
769 case PLUS:
770 value = f0 + f1;
771 break;
772 case MINUS:
773 value = f0 - f1;
774 break;
775 case MULT:
776 value = f0 * f1;
777 break;
778 case DIV:
779 #ifndef REAL_INFINITY
780 if (f1 == 0)
781 return 0;
782 #endif
783 value = f0 / f1;
784 break;
785 case SMIN:
786 value = MIN (f0, f1);
787 break;
788 case SMAX:
789 value = MAX (f0, f1);
790 break;
791 default:
792 abort ();
794 #endif
796 value = real_value_truncate (mode, value);
797 set_float_handler (NULL);
798 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
800 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
802 /* We can fold some multi-word operations. */
803 if (GET_MODE_CLASS (mode) == MODE_INT
804 && width == HOST_BITS_PER_WIDE_INT * 2
805 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
806 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
808 unsigned HOST_WIDE_INT l1, l2, lv;
809 HOST_WIDE_INT h1, h2, hv;
811 if (GET_CODE (op0) == CONST_DOUBLE)
812 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
813 else
814 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
816 if (GET_CODE (op1) == CONST_DOUBLE)
817 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
818 else
819 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
821 switch (code)
823 case MINUS:
824 /* A - B == A + (-B). */
825 neg_double (l2, h2, &lv, &hv);
826 l2 = lv, h2 = hv;
828 /* .. fall through ... */
830 case PLUS:
831 add_double (l1, h1, l2, h2, &lv, &hv);
832 break;
834 case MULT:
835 mul_double (l1, h1, l2, h2, &lv, &hv);
836 break;
838 case DIV: case MOD: case UDIV: case UMOD:
839 /* We'd need to include tree.h to do this and it doesn't seem worth
840 it. */
841 return 0;
843 case AND:
844 lv = l1 & l2, hv = h1 & h2;
845 break;
847 case IOR:
848 lv = l1 | l2, hv = h1 | h2;
849 break;
851 case XOR:
852 lv = l1 ^ l2, hv = h1 ^ h2;
853 break;
855 case SMIN:
856 if (h1 < h2
857 || (h1 == h2
858 && ((unsigned HOST_WIDE_INT) l1
859 < (unsigned HOST_WIDE_INT) l2)))
860 lv = l1, hv = h1;
861 else
862 lv = l2, hv = h2;
863 break;
865 case SMAX:
866 if (h1 > h2
867 || (h1 == h2
868 && ((unsigned HOST_WIDE_INT) l1
869 > (unsigned HOST_WIDE_INT) l2)))
870 lv = l1, hv = h1;
871 else
872 lv = l2, hv = h2;
873 break;
875 case UMIN:
876 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
877 || (h1 == h2
878 && ((unsigned HOST_WIDE_INT) l1
879 < (unsigned HOST_WIDE_INT) l2)))
880 lv = l1, hv = h1;
881 else
882 lv = l2, hv = h2;
883 break;
885 case UMAX:
886 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
887 || (h1 == h2
888 && ((unsigned HOST_WIDE_INT) l1
889 > (unsigned HOST_WIDE_INT) l2)))
890 lv = l1, hv = h1;
891 else
892 lv = l2, hv = h2;
893 break;
895 case LSHIFTRT: case ASHIFTRT:
896 case ASHIFT:
897 case ROTATE: case ROTATERT:
898 #ifdef SHIFT_COUNT_TRUNCATED
899 if (SHIFT_COUNT_TRUNCATED)
900 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
901 #endif
903 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
904 return 0;
906 if (code == LSHIFTRT || code == ASHIFTRT)
907 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
908 code == ASHIFTRT);
909 else if (code == ASHIFT)
910 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
911 else if (code == ROTATE)
912 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
913 else /* code == ROTATERT */
914 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
915 break;
917 default:
918 return 0;
921 return immed_double_const (lv, hv, mode);
924 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
925 || width > HOST_BITS_PER_WIDE_INT || width == 0)
927 /* Even if we can't compute a constant result,
928 there are some cases worth simplifying. */
930 switch (code)
932 case PLUS:
933 /* In IEEE floating point, x+0 is not the same as x. Similarly
934 for the other optimizations below. */
935 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
936 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
937 break;
939 if (op1 == CONST0_RTX (mode))
940 return op0;
942 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
943 if (GET_CODE (op0) == NEG)
944 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
945 else if (GET_CODE (op1) == NEG)
946 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
948 /* (~a) + 1 -> -a */
949 if (INTEGRAL_MODE_P (mode)
950 && GET_CODE (op0) == NOT
951 && GET_CODE (op1) == CONST_INT
952 && INTVAL (op1) == 1)
953 return gen_rtx_NEG (mode, XEXP (op0, 0));
955 /* Handle both-operands-constant cases. We can only add
956 CONST_INTs to constants since the sum of relocatable symbols
957 can't be handled by most assemblers. Don't add CONST_INT
958 to CONST_INT since overflow won't be computed properly if wider
959 than HOST_BITS_PER_WIDE_INT. */
961 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
962 && GET_CODE (op1) == CONST_INT)
963 return plus_constant (op0, INTVAL (op1));
964 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
965 && GET_CODE (op0) == CONST_INT)
966 return plus_constant (op1, INTVAL (op0));
968 /* See if this is something like X * C - X or vice versa or
969 if the multiplication is written as a shift. If so, we can
970 distribute and make a new multiply, shift, or maybe just
971 have X (if C is 2 in the example above). But don't make
972 real multiply if we didn't have one before. */
974 if (! FLOAT_MODE_P (mode))
976 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
977 rtx lhs = op0, rhs = op1;
978 int had_mult = 0;
980 if (GET_CODE (lhs) == NEG)
981 coeff0 = -1, lhs = XEXP (lhs, 0);
982 else if (GET_CODE (lhs) == MULT
983 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
985 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
986 had_mult = 1;
988 else if (GET_CODE (lhs) == ASHIFT
989 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
990 && INTVAL (XEXP (lhs, 1)) >= 0
991 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
993 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
994 lhs = XEXP (lhs, 0);
997 if (GET_CODE (rhs) == NEG)
998 coeff1 = -1, rhs = XEXP (rhs, 0);
999 else if (GET_CODE (rhs) == MULT
1000 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1002 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1003 had_mult = 1;
1005 else if (GET_CODE (rhs) == ASHIFT
1006 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1007 && INTVAL (XEXP (rhs, 1)) >= 0
1008 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1010 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1011 rhs = XEXP (rhs, 0);
1014 if (rtx_equal_p (lhs, rhs))
1016 tem = simplify_gen_binary (MULT, mode, lhs,
1017 GEN_INT (coeff0 + coeff1));
1018 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1022 /* If one of the operands is a PLUS or a MINUS, see if we can
1023 simplify this by the associative law.
1024 Don't use the associative law for floating point.
1025 The inaccuracy makes it nonassociative,
1026 and subtle programs can break if operations are associated. */
1028 if (INTEGRAL_MODE_P (mode)
1029 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1030 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1031 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1032 return tem;
1033 break;
1035 case COMPARE:
1036 #ifdef HAVE_cc0
1037 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1038 using cc0, in which case we want to leave it as a COMPARE
1039 so we can distinguish it from a register-register-copy.
1041 In IEEE floating point, x-0 is not the same as x. */
1043 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1044 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1045 && op1 == CONST0_RTX (mode))
1046 return op0;
1047 #endif
1049 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1050 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1051 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1052 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1054 rtx xop00 = XEXP (op0, 0);
1055 rtx xop10 = XEXP (op1, 0);
1057 #ifdef HAVE_cc0
1058 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1059 #else
1060 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1061 && GET_MODE (xop00) == GET_MODE (xop10)
1062 && REGNO (xop00) == REGNO (xop10)
1063 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1064 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1065 #endif
1066 return xop00;
1069 break;
1070 case MINUS:
1071 /* None of these optimizations can be done for IEEE
1072 floating point. */
1073 if (TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
1074 && FLOAT_MODE_P (mode) && ! flag_unsafe_math_optimizations)
1075 break;
1077 /* We can't assume x-x is 0 even with non-IEEE floating point,
1078 but since it is zero except in very strange circumstances, we
1079 will treat it as zero with -funsafe-math-optimizations. */
1080 if (rtx_equal_p (op0, op1)
1081 && ! side_effects_p (op0)
1082 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1083 return CONST0_RTX (mode);
1085 /* Change subtraction from zero into negation. */
1086 if (op0 == CONST0_RTX (mode))
1087 return gen_rtx_NEG (mode, op1);
1089 /* (-1 - a) is ~a. */
1090 if (op0 == constm1_rtx)
1091 return gen_rtx_NOT (mode, op1);
1093 /* Subtracting 0 has no effect. */
1094 if (op1 == CONST0_RTX (mode))
1095 return op0;
1097 /* See if this is something like X * C - X or vice versa or
1098 if the multiplication is written as a shift. If so, we can
1099 distribute and make a new multiply, shift, or maybe just
1100 have X (if C is 2 in the example above). But don't make
1101 real multiply if we didn't have one before. */
1103 if (! FLOAT_MODE_P (mode))
1105 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1106 rtx lhs = op0, rhs = op1;
1107 int had_mult = 0;
1109 if (GET_CODE (lhs) == NEG)
1110 coeff0 = -1, lhs = XEXP (lhs, 0);
1111 else if (GET_CODE (lhs) == MULT
1112 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1114 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1115 had_mult = 1;
1117 else if (GET_CODE (lhs) == ASHIFT
1118 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1119 && INTVAL (XEXP (lhs, 1)) >= 0
1120 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1122 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1123 lhs = XEXP (lhs, 0);
1126 if (GET_CODE (rhs) == NEG)
1127 coeff1 = - 1, rhs = XEXP (rhs, 0);
1128 else if (GET_CODE (rhs) == MULT
1129 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1131 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1132 had_mult = 1;
1134 else if (GET_CODE (rhs) == ASHIFT
1135 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1136 && INTVAL (XEXP (rhs, 1)) >= 0
1137 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1139 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1140 rhs = XEXP (rhs, 0);
1143 if (rtx_equal_p (lhs, rhs))
1145 tem = simplify_gen_binary (MULT, mode, lhs,
1146 GEN_INT (coeff0 - coeff1));
1147 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1151 /* (a - (-b)) -> (a + b). */
1152 if (GET_CODE (op1) == NEG)
1153 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1155 /* If one of the operands is a PLUS or a MINUS, see if we can
1156 simplify this by the associative law.
1157 Don't use the associative law for floating point.
1158 The inaccuracy makes it nonassociative,
1159 and subtle programs can break if operations are associated. */
1161 if (INTEGRAL_MODE_P (mode)
1162 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1163 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS)
1164 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1165 return tem;
1167 /* Don't let a relocatable value get a negative coeff. */
1168 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1169 return plus_constant (op0, - INTVAL (op1));
1171 /* (x - (x & y)) -> (x & ~y) */
1172 if (GET_CODE (op1) == AND)
1174 if (rtx_equal_p (op0, XEXP (op1, 0)))
1175 return simplify_gen_binary (AND, mode, op0,
1176 gen_rtx_NOT (mode, XEXP (op1, 1)));
1177 if (rtx_equal_p (op0, XEXP (op1, 1)))
1178 return simplify_gen_binary (AND, mode, op0,
1179 gen_rtx_NOT (mode, XEXP (op1, 0)));
1181 break;
1183 case MULT:
1184 if (op1 == constm1_rtx)
1186 tem = simplify_unary_operation (NEG, mode, op0, mode);
1188 return tem ? tem : gen_rtx_NEG (mode, op0);
1191 /* In IEEE floating point, x*0 is not always 0. */
1192 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1193 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1194 && op1 == CONST0_RTX (mode)
1195 && ! side_effects_p (op0))
1196 return op1;
1198 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1199 However, ANSI says we can drop signals,
1200 so we can do this anyway. */
1201 if (op1 == CONST1_RTX (mode))
1202 return op0;
1204 /* Convert multiply by constant power of two into shift unless
1205 we are still generating RTL. This test is a kludge. */
1206 if (GET_CODE (op1) == CONST_INT
1207 && (val = exact_log2 (INTVAL (op1))) >= 0
1208 /* If the mode is larger than the host word size, and the
1209 uppermost bit is set, then this isn't a power of two due
1210 to implicit sign extension. */
1211 && (width <= HOST_BITS_PER_WIDE_INT
1212 || val != HOST_BITS_PER_WIDE_INT - 1)
1213 && ! rtx_equal_function_value_matters)
1214 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1216 if (GET_CODE (op1) == CONST_DOUBLE
1217 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT)
1219 REAL_VALUE_TYPE d;
1220 jmp_buf handler;
1221 int op1is2, op1ism1;
1223 if (setjmp (handler))
1224 return 0;
1226 set_float_handler (handler);
1227 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1228 op1is2 = REAL_VALUES_EQUAL (d, dconst2);
1229 op1ism1 = REAL_VALUES_EQUAL (d, dconstm1);
1230 set_float_handler (NULL);
1232 /* x*2 is x+x and x*(-1) is -x */
1233 if (op1is2 && GET_MODE (op0) == mode)
1234 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1236 else if (op1ism1 && GET_MODE (op0) == mode)
1237 return gen_rtx_NEG (mode, op0);
1239 break;
1241 case IOR:
1242 if (op1 == const0_rtx)
1243 return op0;
1244 if (GET_CODE (op1) == CONST_INT
1245 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1246 return op1;
1247 if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1248 return op0;
1249 /* A | (~A) -> -1 */
1250 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1251 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1252 && ! side_effects_p (op0)
1253 && GET_MODE_CLASS (mode) != MODE_CC)
1254 return constm1_rtx;
1255 break;
1257 case XOR:
1258 if (op1 == const0_rtx)
1259 return op0;
1260 if (GET_CODE (op1) == CONST_INT
1261 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1262 return gen_rtx_NOT (mode, op0);
1263 if (op0 == op1 && ! side_effects_p (op0)
1264 && GET_MODE_CLASS (mode) != MODE_CC)
1265 return const0_rtx;
1266 break;
1268 case AND:
1269 if (op1 == const0_rtx && ! side_effects_p (op0))
1270 return const0_rtx;
1271 if (GET_CODE (op1) == CONST_INT
1272 && (INTVAL (op1) & GET_MODE_MASK (mode)) == GET_MODE_MASK (mode))
1273 return op0;
1274 if (op0 == op1 && ! side_effects_p (op0)
1275 && GET_MODE_CLASS (mode) != MODE_CC)
1276 return op0;
1277 /* A & (~A) -> 0 */
1278 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1279 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1280 && ! side_effects_p (op0)
1281 && GET_MODE_CLASS (mode) != MODE_CC)
1282 return const0_rtx;
1283 break;
1285 case UDIV:
1286 /* Convert divide by power of two into shift (divide by 1 handled
1287 below). */
1288 if (GET_CODE (op1) == CONST_INT
1289 && (arg1 = exact_log2 (INTVAL (op1))) > 0)
1290 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1292 /* ... fall through ... */
1294 case DIV:
1295 if (op1 == CONST1_RTX (mode))
1296 return op0;
1298 /* In IEEE floating point, 0/x is not always 0. */
1299 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1300 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1301 && op0 == CONST0_RTX (mode)
1302 && ! side_effects_p (op1))
1303 return op0;
1305 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1306 /* Change division by a constant into multiplication. Only do
1307 this with -funsafe-math-optimizations. */
1308 else if (GET_CODE (op1) == CONST_DOUBLE
1309 && GET_MODE_CLASS (GET_MODE (op1)) == MODE_FLOAT
1310 && op1 != CONST0_RTX (mode)
1311 && flag_unsafe_math_optimizations)
1313 REAL_VALUE_TYPE d;
1314 REAL_VALUE_FROM_CONST_DOUBLE (d, op1);
1316 if (! REAL_VALUES_EQUAL (d, dconst0))
1318 #if defined (REAL_ARITHMETIC)
1319 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1320 return gen_rtx_MULT (mode, op0,
1321 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1322 #else
1323 return
1324 gen_rtx_MULT (mode, op0,
1325 CONST_DOUBLE_FROM_REAL_VALUE (1./d, mode));
1326 #endif
1329 #endif
1330 break;
1332 case UMOD:
1333 /* Handle modulus by power of two (mod with 1 handled below). */
1334 if (GET_CODE (op1) == CONST_INT
1335 && exact_log2 (INTVAL (op1)) > 0)
1336 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1338 /* ... fall through ... */
1340 case MOD:
1341 if ((op0 == const0_rtx || op1 == const1_rtx)
1342 && ! side_effects_p (op0) && ! side_effects_p (op1))
1343 return const0_rtx;
1344 break;
1346 case ROTATERT:
1347 case ROTATE:
1348 /* Rotating ~0 always results in ~0. */
1349 if (GET_CODE (op0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1350 && (unsigned HOST_WIDE_INT) INTVAL (op0) == GET_MODE_MASK (mode)
1351 && ! side_effects_p (op1))
1352 return op0;
1354 /* ... fall through ... */
1356 case ASHIFT:
1357 case ASHIFTRT:
1358 case LSHIFTRT:
1359 if (op1 == const0_rtx)
1360 return op0;
1361 if (op0 == const0_rtx && ! side_effects_p (op1))
1362 return op0;
1363 break;
1365 case SMIN:
1366 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1367 && INTVAL (op1) == (HOST_WIDE_INT) 1 << (width -1)
1368 && ! side_effects_p (op0))
1369 return op1;
1370 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1371 return op0;
1372 break;
1374 case SMAX:
1375 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (op1) == CONST_INT
1376 && ((unsigned HOST_WIDE_INT) INTVAL (op1)
1377 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1378 && ! side_effects_p (op0))
1379 return op1;
1380 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1381 return op0;
1382 break;
1384 case UMIN:
1385 if (op1 == const0_rtx && ! side_effects_p (op0))
1386 return op1;
1387 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1388 return op0;
1389 break;
1391 case UMAX:
1392 if (op1 == constm1_rtx && ! side_effects_p (op0))
1393 return op1;
1394 else if (rtx_equal_p (op0, op1) && ! side_effects_p (op0))
1395 return op0;
1396 break;
1398 default:
1399 abort ();
1402 return 0;
1405 /* Get the integer argument values in two forms:
1406 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1408 arg0 = INTVAL (op0);
1409 arg1 = INTVAL (op1);
1411 if (width < HOST_BITS_PER_WIDE_INT)
1413 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1414 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1416 arg0s = arg0;
1417 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1418 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1420 arg1s = arg1;
1421 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1422 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1424 else
1426 arg0s = arg0;
1427 arg1s = arg1;
1430 /* Compute the value of the arithmetic. */
1432 switch (code)
1434 case PLUS:
1435 val = arg0s + arg1s;
1436 break;
1438 case MINUS:
1439 val = arg0s - arg1s;
1440 break;
1442 case MULT:
1443 val = arg0s * arg1s;
1444 break;
1446 case DIV:
1447 if (arg1s == 0
1448 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1449 && arg1s == -1))
1450 return 0;
1451 val = arg0s / arg1s;
1452 break;
1454 case MOD:
1455 if (arg1s == 0
1456 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1457 && arg1s == -1))
1458 return 0;
1459 val = arg0s % arg1s;
1460 break;
1462 case UDIV:
1463 if (arg1 == 0
1464 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1465 && arg1s == -1))
1466 return 0;
1467 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1468 break;
1470 case UMOD:
1471 if (arg1 == 0
1472 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1473 && arg1s == -1))
1474 return 0;
1475 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1476 break;
1478 case AND:
1479 val = arg0 & arg1;
1480 break;
1482 case IOR:
1483 val = arg0 | arg1;
1484 break;
1486 case XOR:
1487 val = arg0 ^ arg1;
1488 break;
1490 case LSHIFTRT:
1491 /* If shift count is undefined, don't fold it; let the machine do
1492 what it wants. But truncate it if the machine will do that. */
1493 if (arg1 < 0)
1494 return 0;
1496 #ifdef SHIFT_COUNT_TRUNCATED
1497 if (SHIFT_COUNT_TRUNCATED)
1498 arg1 %= width;
1499 #endif
1501 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1502 break;
1504 case ASHIFT:
1505 if (arg1 < 0)
1506 return 0;
1508 #ifdef SHIFT_COUNT_TRUNCATED
1509 if (SHIFT_COUNT_TRUNCATED)
1510 arg1 %= width;
1511 #endif
1513 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1514 break;
1516 case ASHIFTRT:
1517 if (arg1 < 0)
1518 return 0;
1520 #ifdef SHIFT_COUNT_TRUNCATED
1521 if (SHIFT_COUNT_TRUNCATED)
1522 arg1 %= width;
1523 #endif
1525 val = arg0s >> arg1;
1527 /* Bootstrap compiler may not have sign extended the right shift.
1528 Manually extend the sign to insure bootstrap cc matches gcc. */
1529 if (arg0s < 0 && arg1 > 0)
1530 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1532 break;
1534 case ROTATERT:
1535 if (arg1 < 0)
1536 return 0;
1538 arg1 %= width;
1539 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1540 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1541 break;
1543 case ROTATE:
1544 if (arg1 < 0)
1545 return 0;
1547 arg1 %= width;
1548 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1549 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1550 break;
1552 case COMPARE:
1553 /* Do nothing here. */
1554 return 0;
1556 case SMIN:
1557 val = arg0s <= arg1s ? arg0s : arg1s;
1558 break;
1560 case UMIN:
1561 val = ((unsigned HOST_WIDE_INT) arg0
1562 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1563 break;
1565 case SMAX:
1566 val = arg0s > arg1s ? arg0s : arg1s;
1567 break;
1569 case UMAX:
1570 val = ((unsigned HOST_WIDE_INT) arg0
1571 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1572 break;
1574 default:
1575 abort ();
1578 val = trunc_int_for_mode (val, mode);
1580 return GEN_INT (val);
1583 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1584 PLUS or MINUS.
1586 Rather than test for specific case, we do this by a brute-force method
1587 and do all possible simplifications until no more changes occur. Then
1588 we rebuild the operation. */
1590 static rtx
1591 simplify_plus_minus (code, mode, op0, op1)
1592 enum rtx_code code;
1593 enum machine_mode mode;
1594 rtx op0, op1;
1596 rtx ops[8];
1597 int negs[8];
1598 rtx result, tem;
1599 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts = 0;
1600 int first = 1, negate = 0, changed;
1601 int i, j;
1603 memset ((char *) ops, 0, sizeof ops);
1605 /* Set up the two operands and then expand them until nothing has been
1606 changed. If we run out of room in our array, give up; this should
1607 almost never happen. */
1609 ops[0] = op0, ops[1] = op1, negs[0] = 0, negs[1] = (code == MINUS);
1611 changed = 1;
1612 while (changed)
1614 changed = 0;
1616 for (i = 0; i < n_ops; i++)
1617 switch (GET_CODE (ops[i]))
1619 case PLUS:
1620 case MINUS:
1621 if (n_ops == 7)
1622 return 0;
1624 ops[n_ops] = XEXP (ops[i], 1);
1625 negs[n_ops++] = GET_CODE (ops[i]) == MINUS ? !negs[i] : negs[i];
1626 ops[i] = XEXP (ops[i], 0);
1627 input_ops++;
1628 changed = 1;
1629 break;
1631 case NEG:
1632 ops[i] = XEXP (ops[i], 0);
1633 negs[i] = ! negs[i];
1634 changed = 1;
1635 break;
1637 case CONST:
1638 ops[i] = XEXP (ops[i], 0);
1639 input_consts++;
1640 changed = 1;
1641 break;
1643 case NOT:
1644 /* ~a -> (-a - 1) */
1645 if (n_ops != 7)
1647 ops[n_ops] = constm1_rtx;
1648 negs[n_ops++] = negs[i];
1649 ops[i] = XEXP (ops[i], 0);
1650 negs[i] = ! negs[i];
1651 changed = 1;
1653 break;
1655 case CONST_INT:
1656 if (negs[i])
1657 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0, changed = 1;
1658 break;
1660 default:
1661 break;
1665 /* If we only have two operands, we can't do anything. */
1666 if (n_ops <= 2)
1667 return 0;
1669 /* Now simplify each pair of operands until nothing changes. The first
1670 time through just simplify constants against each other. */
1672 changed = 1;
1673 while (changed)
1675 changed = first;
1677 for (i = 0; i < n_ops - 1; i++)
1678 for (j = i + 1; j < n_ops; j++)
1679 if (ops[i] != 0 && ops[j] != 0
1680 && (! first || (CONSTANT_P (ops[i]) && CONSTANT_P (ops[j]))))
1682 rtx lhs = ops[i], rhs = ops[j];
1683 enum rtx_code ncode = PLUS;
1685 if (negs[i] && ! negs[j])
1686 lhs = ops[j], rhs = ops[i], ncode = MINUS;
1687 else if (! negs[i] && negs[j])
1688 ncode = MINUS;
1690 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1691 if (tem)
1693 ops[i] = tem, ops[j] = 0;
1694 negs[i] = negs[i] && negs[j];
1695 if (GET_CODE (tem) == NEG)
1696 ops[i] = XEXP (tem, 0), negs[i] = ! negs[i];
1698 if (GET_CODE (ops[i]) == CONST_INT && negs[i])
1699 ops[i] = GEN_INT (- INTVAL (ops[i])), negs[i] = 0;
1700 changed = 1;
1704 first = 0;
1707 /* Pack all the operands to the lower-numbered entries and give up if
1708 we didn't reduce the number of operands we had. Make sure we
1709 count a CONST as two operands. If we have the same number of
1710 operands, but have made more CONSTs than we had, this is also
1711 an improvement, so accept it. */
1713 for (i = 0, j = 0; j < n_ops; j++)
1714 if (ops[j] != 0)
1716 ops[i] = ops[j], negs[i++] = negs[j];
1717 if (GET_CODE (ops[j]) == CONST)
1718 n_consts++;
1721 if (i + n_consts > input_ops
1722 || (i + n_consts == input_ops && n_consts <= input_consts))
1723 return 0;
1725 n_ops = i;
1727 /* If we have a CONST_INT, put it last. */
1728 for (i = 0; i < n_ops - 1; i++)
1729 if (GET_CODE (ops[i]) == CONST_INT)
1731 tem = ops[n_ops - 1], ops[n_ops - 1] = ops[i] , ops[i] = tem;
1732 j = negs[n_ops - 1], negs[n_ops - 1] = negs[i], negs[i] = j;
1735 /* Put a non-negated operand first. If there aren't any, make all
1736 operands positive and negate the whole thing later. */
1737 for (i = 0; i < n_ops && negs[i]; i++)
1740 if (i == n_ops)
1742 for (i = 0; i < n_ops; i++)
1743 negs[i] = 0;
1744 negate = 1;
1746 else if (i != 0)
1748 tem = ops[0], ops[0] = ops[i], ops[i] = tem;
1749 j = negs[0], negs[0] = negs[i], negs[i] = j;
1752 /* Now make the result by performing the requested operations. */
1753 result = ops[0];
1754 for (i = 1; i < n_ops; i++)
1755 result = simplify_gen_binary (negs[i] ? MINUS : PLUS, mode, result, ops[i]);
1757 return negate ? gen_rtx_NEG (mode, result) : result;
1760 struct cfc_args
1762 rtx op0, op1; /* Input */
1763 int equal, op0lt, op1lt; /* Output */
1764 int unordered;
1767 static void
1768 check_fold_consts (data)
1769 PTR data;
1771 struct cfc_args *args = (struct cfc_args *) data;
1772 REAL_VALUE_TYPE d0, d1;
1774 /* We may possibly raise an exception while reading the value. */
1775 args->unordered = 1;
1776 REAL_VALUE_FROM_CONST_DOUBLE (d0, args->op0);
1777 REAL_VALUE_FROM_CONST_DOUBLE (d1, args->op1);
1779 /* Comparisons of Inf versus Inf are ordered. */
1780 if (REAL_VALUE_ISNAN (d0)
1781 || REAL_VALUE_ISNAN (d1))
1782 return;
1783 args->equal = REAL_VALUES_EQUAL (d0, d1);
1784 args->op0lt = REAL_VALUES_LESS (d0, d1);
1785 args->op1lt = REAL_VALUES_LESS (d1, d0);
1786 args->unordered = 0;
1789 /* Like simplify_binary_operation except used for relational operators.
1790 MODE is the mode of the operands, not that of the result. If MODE
1791 is VOIDmode, both operands must also be VOIDmode and we compare the
1792 operands in "infinite precision".
1794 If no simplification is possible, this function returns zero. Otherwise,
1795 it returns either const_true_rtx or const0_rtx. */
1798 simplify_relational_operation (code, mode, op0, op1)
1799 enum rtx_code code;
1800 enum machine_mode mode;
1801 rtx op0, op1;
1803 int equal, op0lt, op0ltu, op1lt, op1ltu;
1804 rtx tem;
1806 if (mode == VOIDmode
1807 && (GET_MODE (op0) != VOIDmode
1808 || GET_MODE (op1) != VOIDmode))
1809 abort ();
1811 /* If op0 is a compare, extract the comparison arguments from it. */
1812 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1813 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1815 /* We can't simplify MODE_CC values since we don't know what the
1816 actual comparison is. */
1817 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1818 #ifdef HAVE_cc0
1819 || op0 == cc0_rtx
1820 #endif
1822 return 0;
1824 /* Make sure the constant is second. */
1825 if ((CONSTANT_P (op0) && ! CONSTANT_P (op1))
1826 || (GET_CODE (op0) == CONST_INT && GET_CODE (op1) != CONST_INT))
1828 tem = op0, op0 = op1, op1 = tem;
1829 code = swap_condition (code);
1832 /* For integer comparisons of A and B maybe we can simplify A - B and can
1833 then simplify a comparison of that with zero. If A and B are both either
1834 a register or a CONST_INT, this can't help; testing for these cases will
1835 prevent infinite recursion here and speed things up.
1837 If CODE is an unsigned comparison, then we can never do this optimization,
1838 because it gives an incorrect result if the subtraction wraps around zero.
1839 ANSI C defines unsigned operations such that they never overflow, and
1840 thus such cases can not be ignored. */
1842 if (INTEGRAL_MODE_P (mode) && op1 != const0_rtx
1843 && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == CONST_INT)
1844 && (GET_CODE (op1) == REG || GET_CODE (op1) == CONST_INT))
1845 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1846 && code != GTU && code != GEU && code != LTU && code != LEU)
1847 return simplify_relational_operation (signed_condition (code),
1848 mode, tem, const0_rtx);
1850 if (flag_unsafe_math_optimizations && code == ORDERED)
1851 return const_true_rtx;
1853 if (flag_unsafe_math_optimizations && code == UNORDERED)
1854 return const0_rtx;
1856 /* For non-IEEE floating-point, if the two operands are equal, we know the
1857 result. */
1858 if (rtx_equal_p (op0, op1)
1859 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1860 || ! FLOAT_MODE_P (GET_MODE (op0))
1861 || flag_unsafe_math_optimizations))
1862 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1864 /* If the operands are floating-point constants, see if we can fold
1865 the result. */
1866 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1867 else if (GET_CODE (op0) == CONST_DOUBLE && GET_CODE (op1) == CONST_DOUBLE
1868 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
1870 struct cfc_args args;
1872 /* Setup input for check_fold_consts() */
1873 args.op0 = op0;
1874 args.op1 = op1;
1877 if (!do_float_handler (check_fold_consts, (PTR) &args))
1878 args.unordered = 1;
1880 if (args.unordered)
1881 switch (code)
1883 case UNEQ:
1884 case UNLT:
1885 case UNGT:
1886 case UNLE:
1887 case UNGE:
1888 case NE:
1889 case UNORDERED:
1890 return const_true_rtx;
1891 case EQ:
1892 case LT:
1893 case GT:
1894 case LE:
1895 case GE:
1896 case LTGT:
1897 case ORDERED:
1898 return const0_rtx;
1899 default:
1900 return 0;
1903 /* Receive output from check_fold_consts() */
1904 equal = args.equal;
1905 op0lt = op0ltu = args.op0lt;
1906 op1lt = op1ltu = args.op1lt;
1908 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1910 /* Otherwise, see if the operands are both integers. */
1911 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1912 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
1913 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
1915 int width = GET_MODE_BITSIZE (mode);
1916 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1917 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1919 /* Get the two words comprising each integer constant. */
1920 if (GET_CODE (op0) == CONST_DOUBLE)
1922 l0u = l0s = CONST_DOUBLE_LOW (op0);
1923 h0u = h0s = CONST_DOUBLE_HIGH (op0);
1925 else
1927 l0u = l0s = INTVAL (op0);
1928 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1931 if (GET_CODE (op1) == CONST_DOUBLE)
1933 l1u = l1s = CONST_DOUBLE_LOW (op1);
1934 h1u = h1s = CONST_DOUBLE_HIGH (op1);
1936 else
1938 l1u = l1s = INTVAL (op1);
1939 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1942 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1943 we have to sign or zero-extend the values. */
1944 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1946 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1947 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1949 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1950 l0s |= ((HOST_WIDE_INT) (-1) << width);
1952 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1953 l1s |= ((HOST_WIDE_INT) (-1) << width);
1955 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1956 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1958 equal = (h0u == h1u && l0u == l1u);
1959 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
1960 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
1961 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1962 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1965 /* Otherwise, there are some code-specific tests we can make. */
1966 else
1968 switch (code)
1970 case EQ:
1971 /* References to the frame plus a constant or labels cannot
1972 be zero, but a SYMBOL_REF can due to #pragma weak. */
1973 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1974 || GET_CODE (op0) == LABEL_REF)
1975 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1976 /* On some machines, the ap reg can be 0 sometimes. */
1977 && op0 != arg_pointer_rtx
1978 #endif
1980 return const0_rtx;
1981 break;
1983 case NE:
1984 if (((NONZERO_BASE_PLUS_P (op0) && op1 == const0_rtx)
1985 || GET_CODE (op0) == LABEL_REF)
1986 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1987 && op0 != arg_pointer_rtx
1988 #endif
1990 return const_true_rtx;
1991 break;
1993 case GEU:
1994 /* Unsigned values are never negative. */
1995 if (op1 == const0_rtx)
1996 return const_true_rtx;
1997 break;
1999 case LTU:
2000 if (op1 == const0_rtx)
2001 return const0_rtx;
2002 break;
2004 case LEU:
2005 /* Unsigned values are never greater than the largest
2006 unsigned value. */
2007 if (GET_CODE (op1) == CONST_INT
2008 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
2009 && INTEGRAL_MODE_P (mode))
2010 return const_true_rtx;
2011 break;
2013 case GTU:
2014 if (GET_CODE (op1) == CONST_INT
2015 && (unsigned HOST_WIDE_INT) INTVAL (op1) == GET_MODE_MASK (mode)
2016 && INTEGRAL_MODE_P (mode))
2017 return const0_rtx;
2018 break;
2020 default:
2021 break;
2024 return 0;
2027 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2028 as appropriate. */
2029 switch (code)
2031 case EQ:
2032 case UNEQ:
2033 return equal ? const_true_rtx : const0_rtx;
2034 case NE:
2035 case LTGT:
2036 return ! equal ? const_true_rtx : const0_rtx;
2037 case LT:
2038 case UNLT:
2039 return op0lt ? const_true_rtx : const0_rtx;
2040 case GT:
2041 case UNGT:
2042 return op1lt ? const_true_rtx : const0_rtx;
2043 case LTU:
2044 return op0ltu ? const_true_rtx : const0_rtx;
2045 case GTU:
2046 return op1ltu ? const_true_rtx : const0_rtx;
2047 case LE:
2048 case UNLE:
2049 return equal || op0lt ? const_true_rtx : const0_rtx;
2050 case GE:
2051 case UNGE:
2052 return equal || op1lt ? const_true_rtx : const0_rtx;
2053 case LEU:
2054 return equal || op0ltu ? const_true_rtx : const0_rtx;
2055 case GEU:
2056 return equal || op1ltu ? const_true_rtx : const0_rtx;
2057 case ORDERED:
2058 return const_true_rtx;
2059 case UNORDERED:
2060 return const0_rtx;
2061 default:
2062 abort ();
2066 /* Simplify CODE, an operation with result mode MODE and three operands,
2067 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2068 a constant. Return 0 if no simplifications is possible. */
2071 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2072 enum rtx_code code;
2073 enum machine_mode mode, op0_mode;
2074 rtx op0, op1, op2;
2076 unsigned int width = GET_MODE_BITSIZE (mode);
2078 /* VOIDmode means "infinite" precision. */
2079 if (width == 0)
2080 width = HOST_BITS_PER_WIDE_INT;
2082 switch (code)
2084 case SIGN_EXTRACT:
2085 case ZERO_EXTRACT:
2086 if (GET_CODE (op0) == CONST_INT
2087 && GET_CODE (op1) == CONST_INT
2088 && GET_CODE (op2) == CONST_INT
2089 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2090 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2092 /* Extracting a bit-field from a constant */
2093 HOST_WIDE_INT val = INTVAL (op0);
2095 if (BITS_BIG_ENDIAN)
2096 val >>= (GET_MODE_BITSIZE (op0_mode)
2097 - INTVAL (op2) - INTVAL (op1));
2098 else
2099 val >>= INTVAL (op2);
2101 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2103 /* First zero-extend. */
2104 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2105 /* If desired, propagate sign bit. */
2106 if (code == SIGN_EXTRACT
2107 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2108 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2111 /* Clear the bits that don't belong in our mode,
2112 unless they and our sign bit are all one.
2113 So we get either a reasonable negative value or a reasonable
2114 unsigned value for this mode. */
2115 if (width < HOST_BITS_PER_WIDE_INT
2116 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2117 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2118 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2120 return GEN_INT (val);
2122 break;
2124 case IF_THEN_ELSE:
2125 if (GET_CODE (op0) == CONST_INT)
2126 return op0 != const0_rtx ? op1 : op2;
2128 /* Convert a == b ? b : a to "a". */
2129 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2130 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2131 && rtx_equal_p (XEXP (op0, 0), op1)
2132 && rtx_equal_p (XEXP (op0, 1), op2))
2133 return op1;
2134 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2135 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2136 && rtx_equal_p (XEXP (op0, 1), op1)
2137 && rtx_equal_p (XEXP (op0, 0), op2))
2138 return op2;
2139 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2141 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2142 ? GET_MODE (XEXP (op0, 1))
2143 : GET_MODE (XEXP (op0, 0)));
2144 rtx temp;
2145 if (cmp_mode == VOIDmode)
2146 cmp_mode = op0_mode;
2147 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2148 XEXP (op0, 0), XEXP (op0, 1));
2150 /* See if any simplifications were possible. */
2151 if (temp == const0_rtx)
2152 return op2;
2153 else if (temp == const1_rtx)
2154 return op1;
2155 else if (temp)
2156 op0 = temp;
2158 /* Look for happy constants in op1 and op2. */
2159 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2161 HOST_WIDE_INT t = INTVAL (op1);
2162 HOST_WIDE_INT f = INTVAL (op2);
2164 if (t == STORE_FLAG_VALUE && f == 0)
2165 code = GET_CODE (op0);
2166 else if (t == 0 && f == STORE_FLAG_VALUE)
2168 enum rtx_code tmp;
2169 tmp = reversed_comparison_code (op0, NULL_RTX);
2170 if (tmp == UNKNOWN)
2171 break;
2172 code = tmp;
2174 else
2175 break;
2177 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2180 break;
2182 default:
2183 abort ();
2186 return 0;
2189 /* Simplify X, an rtx expression.
2191 Return the simplified expression or NULL if no simplifications
2192 were possible.
2194 This is the preferred entry point into the simplification routines;
2195 however, we still allow passes to call the more specific routines.
2197 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2198 code that need to be unified.
2200 1. fold_rtx in cse.c. This code uses various CSE specific
2201 information to aid in RTL simplification.
2203 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2204 it uses combine specific information to aid in RTL
2205 simplification.
2207 3. The routines in this file.
2210 Long term we want to only have one body of simplification code; to
2211 get to that state I recommend the following steps:
2213 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2214 which are not pass dependent state into these routines.
2216 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2217 use this routine whenever possible.
2219 3. Allow for pass dependent state to be provided to these
2220 routines and add simplifications based on the pass dependent
2221 state. Remove code from cse.c & combine.c that becomes
2222 redundant/dead.
2224 It will take time, but ultimately the compiler will be easier to
2225 maintain and improve. It's totally silly that when we add a
2226 simplification that it needs to be added to 4 places (3 for RTL
2227 simplification and 1 for tree simplification. */
2230 simplify_rtx (x)
2231 rtx x;
2233 enum rtx_code code = GET_CODE (x);
2234 enum machine_mode mode = GET_MODE (x);
2236 switch (GET_RTX_CLASS (code))
2238 case '1':
2239 return simplify_unary_operation (code, mode,
2240 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2241 case '2':
2242 case 'c':
2243 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2245 case '3':
2246 case 'b':
2247 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2248 XEXP (x, 0), XEXP (x, 1),
2249 XEXP (x, 2));
2251 case '<':
2252 return simplify_relational_operation (code,
2253 ((GET_MODE (XEXP (x, 0))
2254 != VOIDmode)
2255 ? GET_MODE (XEXP (x, 0))
2256 : GET_MODE (XEXP (x, 1))),
2257 XEXP (x, 0), XEXP (x, 1));
2258 default:
2259 return NULL;