* config/mips/netbsd.h (TARGET_OS_CPP_BUILTINS): Define.
[official-gcc.git] / gcc / simplify-rtx.c
blob6ec33a3935be05754036ccf08c6beb6108b4ec3c
1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
23 #include "config.h"
24 #include "system.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "flags.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "recog.h"
34 #include "function.h"
35 #include "expr.h"
36 #include "toplev.h"
37 #include "output.h"
38 #include "ggc.h"
40 /* Simplification and canonicalization of RTL. */
42 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
43 virtual regs here because the simplify_*_operation routines are called
44 by integrate.c, which is called before virtual register instantiation.
46 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
47 a header file so that their definitions can be shared with the
48 simplification routines in simplify-rtx.c. Until then, do not
49 change these macros without also changing the copy in simplify-rtx.c. */
51 #define FIXED_BASE_PLUS_P(X) \
52 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
53 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
54 || (X) == virtual_stack_vars_rtx \
55 || (X) == virtual_incoming_args_rtx \
56 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
57 && (XEXP (X, 0) == frame_pointer_rtx \
58 || XEXP (X, 0) == hard_frame_pointer_rtx \
59 || ((X) == arg_pointer_rtx \
60 && fixed_regs[ARG_POINTER_REGNUM]) \
61 || XEXP (X, 0) == virtual_stack_vars_rtx \
62 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
63 || GET_CODE (X) == ADDRESSOF)
65 /* Similar, but also allows reference to the stack pointer.
67 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
68 arg_pointer_rtx by itself is nonzero, because on at least one machine,
69 the i960, the arg pointer is zero when it is unused. */
71 #define NONZERO_BASE_PLUS_P(X) \
72 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
73 || (X) == virtual_stack_vars_rtx \
74 || (X) == virtual_incoming_args_rtx \
75 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
76 && (XEXP (X, 0) == frame_pointer_rtx \
77 || XEXP (X, 0) == hard_frame_pointer_rtx \
78 || ((X) == arg_pointer_rtx \
79 && fixed_regs[ARG_POINTER_REGNUM]) \
80 || XEXP (X, 0) == virtual_stack_vars_rtx \
81 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
82 || (X) == stack_pointer_rtx \
83 || (X) == virtual_stack_dynamic_rtx \
84 || (X) == virtual_outgoing_args_rtx \
85 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
86 && (XEXP (X, 0) == stack_pointer_rtx \
87 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
88 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
89 || GET_CODE (X) == ADDRESSOF)
91 /* Much code operates on (low, high) pairs; the low value is an
92 unsigned wide int, the high value a signed wide int. We
93 occasionally need to sign extend from low to high as if low were a
94 signed wide int. */
95 #define HWI_SIGN_EXTEND(low) \
96 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
98 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
99 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
100 const void *));
101 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
102 enum machine_mode, rtx,
103 rtx, int));
105 /* Negate a CONST_INT rtx, truncating (because a conversion from a
106 maximally negative number can overflow). */
107 static rtx
108 neg_const_int (mode, i)
109 enum machine_mode mode;
110 rtx i;
112 return gen_int_mode (- INTVAL (i), mode);
116 /* Make a binary operation by properly ordering the operands and
117 seeing if the expression folds. */
120 simplify_gen_binary (code, mode, op0, op1)
121 enum rtx_code code;
122 enum machine_mode mode;
123 rtx op0, op1;
125 rtx tem;
127 /* Put complex operands first and constants second if commutative. */
128 if (GET_RTX_CLASS (code) == 'c'
129 && swap_commutative_operands_p (op0, op1))
130 tem = op0, op0 = op1, op1 = tem;
132 /* If this simplifies, do it. */
133 tem = simplify_binary_operation (code, mode, op0, op1);
134 if (tem)
135 return tem;
137 /* Handle addition and subtraction specially. Otherwise, just form
138 the operation. */
140 if (code == PLUS || code == MINUS)
142 tem = simplify_plus_minus (code, mode, op0, op1, 1);
143 if (tem)
144 return tem;
147 return gen_rtx_fmt_ee (code, mode, op0, op1);
150 /* If X is a MEM referencing the constant pool, return the real value.
151 Otherwise return X. */
153 avoid_constant_pool_reference (x)
154 rtx x;
156 rtx c, addr;
157 enum machine_mode cmode;
159 if (GET_CODE (x) != MEM)
160 return x;
161 addr = XEXP (x, 0);
163 if (GET_CODE (addr) != SYMBOL_REF
164 || ! CONSTANT_POOL_ADDRESS_P (addr))
165 return x;
167 c = get_pool_constant (addr);
168 cmode = get_pool_mode (addr);
170 /* If we're accessing the constant in a different mode than it was
171 originally stored, attempt to fix that up via subreg simplifications.
172 If that fails we have no choice but to return the original memory. */
173 if (cmode != GET_MODE (x))
175 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
176 return c ? c : x;
179 return c;
182 /* Make a unary operation by first seeing if it folds and otherwise making
183 the specified operation. */
186 simplify_gen_unary (code, mode, op, op_mode)
187 enum rtx_code code;
188 enum machine_mode mode;
189 rtx op;
190 enum machine_mode op_mode;
192 rtx tem;
194 /* If this simplifies, use it. */
195 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
196 return tem;
198 return gen_rtx_fmt_e (code, mode, op);
201 /* Likewise for ternary operations. */
204 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
205 enum rtx_code code;
206 enum machine_mode mode, op0_mode;
207 rtx op0, op1, op2;
209 rtx tem;
211 /* If this simplifies, use it. */
212 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
213 op0, op1, op2)))
214 return tem;
216 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
219 /* Likewise, for relational operations.
220 CMP_MODE specifies mode comparison is done in.
224 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
225 enum rtx_code code;
226 enum machine_mode mode;
227 enum machine_mode cmp_mode;
228 rtx op0, op1;
230 rtx tem;
232 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
233 return tem;
235 /* If op0 is a compare, extract the comparison arguments from it. */
236 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
237 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
239 /* Put complex operands first and constants second. */
240 if (swap_commutative_operands_p (op0, op1))
241 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
243 return gen_rtx_fmt_ee (code, mode, op0, op1);
246 /* Replace all occurrences of OLD in X with NEW and try to simplify the
247 resulting RTX. Return a new RTX which is as simplified as possible. */
250 simplify_replace_rtx (x, old, new)
251 rtx x;
252 rtx old;
253 rtx new;
255 enum rtx_code code = GET_CODE (x);
256 enum machine_mode mode = GET_MODE (x);
258 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
259 to build a new expression substituting recursively. If we can't do
260 anything, return our input. */
262 if (x == old)
263 return new;
265 switch (GET_RTX_CLASS (code))
267 case '1':
269 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
270 rtx op = (XEXP (x, 0) == old
271 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
273 return simplify_gen_unary (code, mode, op, op_mode);
276 case '2':
277 case 'c':
278 return
279 simplify_gen_binary (code, mode,
280 simplify_replace_rtx (XEXP (x, 0), old, new),
281 simplify_replace_rtx (XEXP (x, 1), old, new));
282 case '<':
284 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
285 ? GET_MODE (XEXP (x, 0))
286 : GET_MODE (XEXP (x, 1)));
287 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
288 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
290 return
291 simplify_gen_relational (code, mode,
292 (op_mode != VOIDmode
293 ? op_mode
294 : GET_MODE (op0) != VOIDmode
295 ? GET_MODE (op0)
296 : GET_MODE (op1)),
297 op0, op1);
300 case '3':
301 case 'b':
303 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
304 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
306 return
307 simplify_gen_ternary (code, mode,
308 (op_mode != VOIDmode
309 ? op_mode
310 : GET_MODE (op0)),
311 op0,
312 simplify_replace_rtx (XEXP (x, 1), old, new),
313 simplify_replace_rtx (XEXP (x, 2), old, new));
316 case 'x':
317 /* The only case we try to handle is a SUBREG. */
318 if (code == SUBREG)
320 rtx exp;
321 exp = simplify_gen_subreg (GET_MODE (x),
322 simplify_replace_rtx (SUBREG_REG (x),
323 old, new),
324 GET_MODE (SUBREG_REG (x)),
325 SUBREG_BYTE (x));
326 if (exp)
327 x = exp;
329 return x;
331 default:
332 if (GET_CODE (x) == MEM)
333 return
334 replace_equiv_address_nv (x,
335 simplify_replace_rtx (XEXP (x, 0),
336 old, new));
338 return x;
340 return x;
343 /* Try to simplify a unary operation CODE whose output mode is to be
344 MODE with input operand OP whose mode was originally OP_MODE.
345 Return zero if no simplification can be made. */
347 simplify_unary_operation (code, mode, op, op_mode)
348 enum rtx_code code;
349 enum machine_mode mode;
350 rtx op;
351 enum machine_mode op_mode;
353 unsigned int width = GET_MODE_BITSIZE (mode);
354 rtx trueop = avoid_constant_pool_reference (op);
356 /* The order of these tests is critical so that, for example, we don't
357 check the wrong mode (input vs. output) for a conversion operation,
358 such as FIX. At some point, this should be simplified. */
360 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
361 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
363 HOST_WIDE_INT hv, lv;
364 REAL_VALUE_TYPE d;
366 if (GET_CODE (trueop) == CONST_INT)
367 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
368 else
369 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
371 REAL_VALUE_FROM_INT (d, lv, hv, mode);
372 d = real_value_truncate (mode, d);
373 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
375 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
376 && (GET_CODE (trueop) == CONST_DOUBLE
377 || GET_CODE (trueop) == CONST_INT))
379 HOST_WIDE_INT hv, lv;
380 REAL_VALUE_TYPE d;
382 if (GET_CODE (trueop) == CONST_INT)
383 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
384 else
385 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
387 if (op_mode == VOIDmode)
389 /* We don't know how to interpret negative-looking numbers in
390 this case, so don't try to fold those. */
391 if (hv < 0)
392 return 0;
394 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
396 else
397 hv = 0, lv &= GET_MODE_MASK (op_mode);
399 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
400 d = real_value_truncate (mode, d);
401 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
404 if (GET_CODE (trueop) == CONST_INT
405 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
407 HOST_WIDE_INT arg0 = INTVAL (trueop);
408 HOST_WIDE_INT val;
410 switch (code)
412 case NOT:
413 val = ~ arg0;
414 break;
416 case NEG:
417 val = - arg0;
418 break;
420 case ABS:
421 val = (arg0 >= 0 ? arg0 : - arg0);
422 break;
424 case FFS:
425 /* Don't use ffs here. Instead, get low order bit and then its
426 number. If arg0 is zero, this will return 0, as desired. */
427 arg0 &= GET_MODE_MASK (mode);
428 val = exact_log2 (arg0 & (- arg0)) + 1;
429 break;
431 case TRUNCATE:
432 val = arg0;
433 break;
435 case ZERO_EXTEND:
436 /* When zero-extending a CONST_INT, we need to know its
437 original mode. */
438 if (op_mode == VOIDmode)
439 abort ();
440 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
442 /* If we were really extending the mode,
443 we would have to distinguish between zero-extension
444 and sign-extension. */
445 if (width != GET_MODE_BITSIZE (op_mode))
446 abort ();
447 val = arg0;
449 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
450 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
451 else
452 return 0;
453 break;
455 case SIGN_EXTEND:
456 if (op_mode == VOIDmode)
457 op_mode = mode;
458 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
460 /* If we were really extending the mode,
461 we would have to distinguish between zero-extension
462 and sign-extension. */
463 if (width != GET_MODE_BITSIZE (op_mode))
464 abort ();
465 val = arg0;
467 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
470 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
471 if (val
472 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
473 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
475 else
476 return 0;
477 break;
479 case SQRT:
480 case FLOAT_EXTEND:
481 case FLOAT_TRUNCATE:
482 case SS_TRUNCATE:
483 case US_TRUNCATE:
484 return 0;
486 default:
487 abort ();
490 val = trunc_int_for_mode (val, mode);
492 return GEN_INT (val);
495 /* We can do some operations on integer CONST_DOUBLEs. Also allow
496 for a DImode operation on a CONST_INT. */
497 else if (GET_MODE (trueop) == VOIDmode
498 && width <= HOST_BITS_PER_WIDE_INT * 2
499 && (GET_CODE (trueop) == CONST_DOUBLE
500 || GET_CODE (trueop) == CONST_INT))
502 unsigned HOST_WIDE_INT l1, lv;
503 HOST_WIDE_INT h1, hv;
505 if (GET_CODE (trueop) == CONST_DOUBLE)
506 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
507 else
508 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
510 switch (code)
512 case NOT:
513 lv = ~ l1;
514 hv = ~ h1;
515 break;
517 case NEG:
518 neg_double (l1, h1, &lv, &hv);
519 break;
521 case ABS:
522 if (h1 < 0)
523 neg_double (l1, h1, &lv, &hv);
524 else
525 lv = l1, hv = h1;
526 break;
528 case FFS:
529 hv = 0;
530 if (l1 == 0)
531 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
532 else
533 lv = exact_log2 (l1 & (-l1)) + 1;
534 break;
536 case TRUNCATE:
537 /* This is just a change-of-mode, so do nothing. */
538 lv = l1, hv = h1;
539 break;
541 case ZERO_EXTEND:
542 if (op_mode == VOIDmode)
543 abort ();
545 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
546 return 0;
548 hv = 0;
549 lv = l1 & GET_MODE_MASK (op_mode);
550 break;
552 case SIGN_EXTEND:
553 if (op_mode == VOIDmode
554 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
555 return 0;
556 else
558 lv = l1 & GET_MODE_MASK (op_mode);
559 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
560 && (lv & ((HOST_WIDE_INT) 1
561 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
562 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
564 hv = HWI_SIGN_EXTEND (lv);
566 break;
568 case SQRT:
569 return 0;
571 default:
572 return 0;
575 return immed_double_const (lv, hv, mode);
578 else if (GET_CODE (trueop) == CONST_DOUBLE
579 && GET_MODE_CLASS (mode) == MODE_FLOAT)
581 REAL_VALUE_TYPE d;
582 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
584 switch (code)
586 case SQRT:
587 /* We don't attempt to optimize this. */
588 return 0;
590 case ABS: d = REAL_VALUE_ABS (d); break;
591 case NEG: d = REAL_VALUE_NEGATE (d); break;
592 case FLOAT_TRUNCATE: d = real_value_truncate (mode, d); break;
593 case FLOAT_EXTEND: /* All this does is change the mode. */ break;
594 case FIX: d = REAL_VALUE_RNDZINT (d); break;
595 case UNSIGNED_FIX: d = REAL_VALUE_UNSIGNED_RNDZINT (d); break;
596 default:
597 abort ();
599 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
602 else if (GET_CODE (trueop) == CONST_DOUBLE
603 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
604 && GET_MODE_CLASS (mode) == MODE_INT
605 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
607 HOST_WIDE_INT i;
608 REAL_VALUE_TYPE d;
609 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
610 switch (code)
612 case FIX: i = REAL_VALUE_FIX (d); break;
613 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
614 default:
615 abort ();
617 return gen_int_mode (i, mode);
620 /* This was formerly used only for non-IEEE float.
621 eggert@twinsun.com says it is safe for IEEE also. */
622 else
624 enum rtx_code reversed;
625 /* There are some simplifications we can do even if the operands
626 aren't constant. */
627 switch (code)
629 case NOT:
630 /* (not (not X)) == X. */
631 if (GET_CODE (op) == NOT)
632 return XEXP (op, 0);
634 /* (not (eq X Y)) == (ne X Y), etc. */
635 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
636 && ((reversed = reversed_comparison_code (op, NULL_RTX))
637 != UNKNOWN))
638 return gen_rtx_fmt_ee (reversed,
639 op_mode, XEXP (op, 0), XEXP (op, 1));
640 break;
642 case NEG:
643 /* (neg (neg X)) == X. */
644 if (GET_CODE (op) == NEG)
645 return XEXP (op, 0);
646 break;
648 case SIGN_EXTEND:
649 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
650 becomes just the MINUS if its mode is MODE. This allows
651 folding switch statements on machines using casesi (such as
652 the VAX). */
653 if (GET_CODE (op) == TRUNCATE
654 && GET_MODE (XEXP (op, 0)) == mode
655 && GET_CODE (XEXP (op, 0)) == MINUS
656 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
657 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
658 return XEXP (op, 0);
660 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
661 if (! POINTERS_EXTEND_UNSIGNED
662 && mode == Pmode && GET_MODE (op) == ptr_mode
663 && (CONSTANT_P (op)
664 || (GET_CODE (op) == SUBREG
665 && GET_CODE (SUBREG_REG (op)) == REG
666 && REG_POINTER (SUBREG_REG (op))
667 && GET_MODE (SUBREG_REG (op)) == Pmode)))
668 return convert_memory_address (Pmode, op);
669 #endif
670 break;
672 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
673 case ZERO_EXTEND:
674 if (POINTERS_EXTEND_UNSIGNED > 0
675 && mode == Pmode && GET_MODE (op) == ptr_mode
676 && (CONSTANT_P (op)
677 || (GET_CODE (op) == SUBREG
678 && GET_CODE (SUBREG_REG (op)) == REG
679 && REG_POINTER (SUBREG_REG (op))
680 && GET_MODE (SUBREG_REG (op)) == Pmode)))
681 return convert_memory_address (Pmode, op);
682 break;
683 #endif
685 default:
686 break;
689 return 0;
693 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
694 and OP1. Return 0 if no simplification is possible.
696 Don't use this for relational operations such as EQ or LT.
697 Use simplify_relational_operation instead. */
699 simplify_binary_operation (code, mode, op0, op1)
700 enum rtx_code code;
701 enum machine_mode mode;
702 rtx op0, op1;
704 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
705 HOST_WIDE_INT val;
706 unsigned int width = GET_MODE_BITSIZE (mode);
707 rtx tem;
708 rtx trueop0 = avoid_constant_pool_reference (op0);
709 rtx trueop1 = avoid_constant_pool_reference (op1);
711 /* Relational operations don't work here. We must know the mode
712 of the operands in order to do the comparison correctly.
713 Assuming a full word can give incorrect results.
714 Consider comparing 128 with -128 in QImode. */
716 if (GET_RTX_CLASS (code) == '<')
717 abort ();
719 /* Make sure the constant is second. */
720 if (GET_RTX_CLASS (code) == 'c'
721 && swap_commutative_operands_p (trueop0, trueop1))
723 tem = op0, op0 = op1, op1 = tem;
724 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
727 if (GET_MODE_CLASS (mode) == MODE_FLOAT
728 && GET_CODE (trueop0) == CONST_DOUBLE
729 && GET_CODE (trueop1) == CONST_DOUBLE
730 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
732 REAL_VALUE_TYPE f0, f1, value;
734 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
735 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
736 f0 = real_value_truncate (mode, f0);
737 f1 = real_value_truncate (mode, f1);
739 if (code == DIV
740 && !MODE_HAS_INFINITIES (mode)
741 && REAL_VALUES_EQUAL (f1, dconst0))
742 return 0;
744 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
746 value = real_value_truncate (mode, value);
747 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
750 /* We can fold some multi-word operations. */
751 if (GET_MODE_CLASS (mode) == MODE_INT
752 && width == HOST_BITS_PER_WIDE_INT * 2
753 && (GET_CODE (trueop0) == CONST_DOUBLE
754 || GET_CODE (trueop0) == CONST_INT)
755 && (GET_CODE (trueop1) == CONST_DOUBLE
756 || GET_CODE (trueop1) == CONST_INT))
758 unsigned HOST_WIDE_INT l1, l2, lv;
759 HOST_WIDE_INT h1, h2, hv;
761 if (GET_CODE (trueop0) == CONST_DOUBLE)
762 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
763 else
764 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
766 if (GET_CODE (trueop1) == CONST_DOUBLE)
767 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
768 else
769 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
771 switch (code)
773 case MINUS:
774 /* A - B == A + (-B). */
775 neg_double (l2, h2, &lv, &hv);
776 l2 = lv, h2 = hv;
778 /* .. fall through ... */
780 case PLUS:
781 add_double (l1, h1, l2, h2, &lv, &hv);
782 break;
784 case MULT:
785 mul_double (l1, h1, l2, h2, &lv, &hv);
786 break;
788 case DIV: case MOD: case UDIV: case UMOD:
789 /* We'd need to include tree.h to do this and it doesn't seem worth
790 it. */
791 return 0;
793 case AND:
794 lv = l1 & l2, hv = h1 & h2;
795 break;
797 case IOR:
798 lv = l1 | l2, hv = h1 | h2;
799 break;
801 case XOR:
802 lv = l1 ^ l2, hv = h1 ^ h2;
803 break;
805 case SMIN:
806 if (h1 < h2
807 || (h1 == h2
808 && ((unsigned HOST_WIDE_INT) l1
809 < (unsigned HOST_WIDE_INT) l2)))
810 lv = l1, hv = h1;
811 else
812 lv = l2, hv = h2;
813 break;
815 case SMAX:
816 if (h1 > h2
817 || (h1 == h2
818 && ((unsigned HOST_WIDE_INT) l1
819 > (unsigned HOST_WIDE_INT) l2)))
820 lv = l1, hv = h1;
821 else
822 lv = l2, hv = h2;
823 break;
825 case UMIN:
826 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
827 || (h1 == h2
828 && ((unsigned HOST_WIDE_INT) l1
829 < (unsigned HOST_WIDE_INT) l2)))
830 lv = l1, hv = h1;
831 else
832 lv = l2, hv = h2;
833 break;
835 case UMAX:
836 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
837 || (h1 == h2
838 && ((unsigned HOST_WIDE_INT) l1
839 > (unsigned HOST_WIDE_INT) l2)))
840 lv = l1, hv = h1;
841 else
842 lv = l2, hv = h2;
843 break;
845 case LSHIFTRT: case ASHIFTRT:
846 case ASHIFT:
847 case ROTATE: case ROTATERT:
848 #ifdef SHIFT_COUNT_TRUNCATED
849 if (SHIFT_COUNT_TRUNCATED)
850 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
851 #endif
853 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
854 return 0;
856 if (code == LSHIFTRT || code == ASHIFTRT)
857 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
858 code == ASHIFTRT);
859 else if (code == ASHIFT)
860 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
861 else if (code == ROTATE)
862 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
863 else /* code == ROTATERT */
864 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
865 break;
867 default:
868 return 0;
871 return immed_double_const (lv, hv, mode);
874 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
875 || width > HOST_BITS_PER_WIDE_INT || width == 0)
877 /* Even if we can't compute a constant result,
878 there are some cases worth simplifying. */
880 switch (code)
882 case PLUS:
883 /* Maybe simplify x + 0 to x. The two expressions are equivalent
884 when x is NaN, infinite, or finite and non-zero. They aren't
885 when x is -0 and the rounding mode is not towards -infinity,
886 since (-0) + 0 is then 0. */
887 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
888 return op0;
890 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
891 transformations are safe even for IEEE. */
892 if (GET_CODE (op0) == NEG)
893 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
894 else if (GET_CODE (op1) == NEG)
895 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
897 /* (~a) + 1 -> -a */
898 if (INTEGRAL_MODE_P (mode)
899 && GET_CODE (op0) == NOT
900 && trueop1 == const1_rtx)
901 return gen_rtx_NEG (mode, XEXP (op0, 0));
903 /* Handle both-operands-constant cases. We can only add
904 CONST_INTs to constants since the sum of relocatable symbols
905 can't be handled by most assemblers. Don't add CONST_INT
906 to CONST_INT since overflow won't be computed properly if wider
907 than HOST_BITS_PER_WIDE_INT. */
909 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
910 && GET_CODE (op1) == CONST_INT)
911 return plus_constant (op0, INTVAL (op1));
912 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
913 && GET_CODE (op0) == CONST_INT)
914 return plus_constant (op1, INTVAL (op0));
916 /* See if this is something like X * C - X or vice versa or
917 if the multiplication is written as a shift. If so, we can
918 distribute and make a new multiply, shift, or maybe just
919 have X (if C is 2 in the example above). But don't make
920 real multiply if we didn't have one before. */
922 if (! FLOAT_MODE_P (mode))
924 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
925 rtx lhs = op0, rhs = op1;
926 int had_mult = 0;
928 if (GET_CODE (lhs) == NEG)
929 coeff0 = -1, lhs = XEXP (lhs, 0);
930 else if (GET_CODE (lhs) == MULT
931 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
933 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
934 had_mult = 1;
936 else if (GET_CODE (lhs) == ASHIFT
937 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
938 && INTVAL (XEXP (lhs, 1)) >= 0
939 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
941 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
942 lhs = XEXP (lhs, 0);
945 if (GET_CODE (rhs) == NEG)
946 coeff1 = -1, rhs = XEXP (rhs, 0);
947 else if (GET_CODE (rhs) == MULT
948 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
950 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
951 had_mult = 1;
953 else if (GET_CODE (rhs) == ASHIFT
954 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
955 && INTVAL (XEXP (rhs, 1)) >= 0
956 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
958 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
959 rhs = XEXP (rhs, 0);
962 if (rtx_equal_p (lhs, rhs))
964 tem = simplify_gen_binary (MULT, mode, lhs,
965 GEN_INT (coeff0 + coeff1));
966 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
970 /* If one of the operands is a PLUS or a MINUS, see if we can
971 simplify this by the associative law.
972 Don't use the associative law for floating point.
973 The inaccuracy makes it nonassociative,
974 and subtle programs can break if operations are associated. */
976 if (INTEGRAL_MODE_P (mode)
977 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
978 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
979 || (GET_CODE (op0) == CONST
980 && GET_CODE (XEXP (op0, 0)) == PLUS)
981 || (GET_CODE (op1) == CONST
982 && GET_CODE (XEXP (op1, 0)) == PLUS))
983 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
984 return tem;
985 break;
987 case COMPARE:
988 #ifdef HAVE_cc0
989 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
990 using cc0, in which case we want to leave it as a COMPARE
991 so we can distinguish it from a register-register-copy.
993 In IEEE floating point, x-0 is not the same as x. */
995 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
996 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
997 && trueop1 == CONST0_RTX (mode))
998 return op0;
999 #endif
1001 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1002 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1003 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1004 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1006 rtx xop00 = XEXP (op0, 0);
1007 rtx xop10 = XEXP (op1, 0);
1009 #ifdef HAVE_cc0
1010 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1011 #else
1012 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1013 && GET_MODE (xop00) == GET_MODE (xop10)
1014 && REGNO (xop00) == REGNO (xop10)
1015 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1016 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1017 #endif
1018 return xop00;
1020 break;
1022 case MINUS:
1023 /* We can't assume x-x is 0 even with non-IEEE floating point,
1024 but since it is zero except in very strange circumstances, we
1025 will treat it as zero with -funsafe-math-optimizations. */
1026 if (rtx_equal_p (trueop0, trueop1)
1027 && ! side_effects_p (op0)
1028 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1029 return CONST0_RTX (mode);
1031 /* Change subtraction from zero into negation. (0 - x) is the
1032 same as -x when x is NaN, infinite, or finite and non-zero.
1033 But if the mode has signed zeros, and does not round towards
1034 -infinity, then 0 - 0 is 0, not -0. */
1035 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1036 return gen_rtx_NEG (mode, op1);
1038 /* (-1 - a) is ~a. */
1039 if (trueop0 == constm1_rtx)
1040 return gen_rtx_NOT (mode, op1);
1042 /* Subtracting 0 has no effect unless the mode has signed zeros
1043 and supports rounding towards -infinity. In such a case,
1044 0 - 0 is -0. */
1045 if (!(HONOR_SIGNED_ZEROS (mode)
1046 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1047 && trueop1 == CONST0_RTX (mode))
1048 return op0;
1050 /* See if this is something like X * C - X or vice versa or
1051 if the multiplication is written as a shift. If so, we can
1052 distribute and make a new multiply, shift, or maybe just
1053 have X (if C is 2 in the example above). But don't make
1054 real multiply if we didn't have one before. */
1056 if (! FLOAT_MODE_P (mode))
1058 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1059 rtx lhs = op0, rhs = op1;
1060 int had_mult = 0;
1062 if (GET_CODE (lhs) == NEG)
1063 coeff0 = -1, lhs = XEXP (lhs, 0);
1064 else if (GET_CODE (lhs) == MULT
1065 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1067 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1068 had_mult = 1;
1070 else if (GET_CODE (lhs) == ASHIFT
1071 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1072 && INTVAL (XEXP (lhs, 1)) >= 0
1073 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1075 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1076 lhs = XEXP (lhs, 0);
1079 if (GET_CODE (rhs) == NEG)
1080 coeff1 = - 1, rhs = XEXP (rhs, 0);
1081 else if (GET_CODE (rhs) == MULT
1082 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1084 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1085 had_mult = 1;
1087 else if (GET_CODE (rhs) == ASHIFT
1088 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1089 && INTVAL (XEXP (rhs, 1)) >= 0
1090 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1092 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1093 rhs = XEXP (rhs, 0);
1096 if (rtx_equal_p (lhs, rhs))
1098 tem = simplify_gen_binary (MULT, mode, lhs,
1099 GEN_INT (coeff0 - coeff1));
1100 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1104 /* (a - (-b)) -> (a + b). True even for IEEE. */
1105 if (GET_CODE (op1) == NEG)
1106 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1108 /* If one of the operands is a PLUS or a MINUS, see if we can
1109 simplify this by the associative law.
1110 Don't use the associative law for floating point.
1111 The inaccuracy makes it nonassociative,
1112 and subtle programs can break if operations are associated. */
1114 if (INTEGRAL_MODE_P (mode)
1115 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1116 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1117 || (GET_CODE (op0) == CONST
1118 && GET_CODE (XEXP (op0, 0)) == PLUS)
1119 || (GET_CODE (op1) == CONST
1120 && GET_CODE (XEXP (op1, 0)) == PLUS))
1121 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1122 return tem;
1124 /* Don't let a relocatable value get a negative coeff. */
1125 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1126 return simplify_gen_binary (PLUS, mode,
1127 op0,
1128 neg_const_int (mode, op1));
1130 /* (x - (x & y)) -> (x & ~y) */
1131 if (GET_CODE (op1) == AND)
1133 if (rtx_equal_p (op0, XEXP (op1, 0)))
1134 return simplify_gen_binary (AND, mode, op0,
1135 gen_rtx_NOT (mode, XEXP (op1, 1)));
1136 if (rtx_equal_p (op0, XEXP (op1, 1)))
1137 return simplify_gen_binary (AND, mode, op0,
1138 gen_rtx_NOT (mode, XEXP (op1, 0)));
1140 break;
1142 case MULT:
1143 if (trueop1 == constm1_rtx)
1145 tem = simplify_unary_operation (NEG, mode, op0, mode);
1147 return tem ? tem : gen_rtx_NEG (mode, op0);
1150 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1151 x is NaN, since x * 0 is then also NaN. Nor is it valid
1152 when the mode has signed zeros, since multiplying a negative
1153 number by 0 will give -0, not 0. */
1154 if (!HONOR_NANS (mode)
1155 && !HONOR_SIGNED_ZEROS (mode)
1156 && trueop1 == CONST0_RTX (mode)
1157 && ! side_effects_p (op0))
1158 return op1;
1160 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1161 However, ANSI says we can drop signals,
1162 so we can do this anyway. */
1163 if (trueop1 == CONST1_RTX (mode))
1164 return op0;
1166 /* Convert multiply by constant power of two into shift unless
1167 we are still generating RTL. This test is a kludge. */
1168 if (GET_CODE (trueop1) == CONST_INT
1169 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1170 /* If the mode is larger than the host word size, and the
1171 uppermost bit is set, then this isn't a power of two due
1172 to implicit sign extension. */
1173 && (width <= HOST_BITS_PER_WIDE_INT
1174 || val != HOST_BITS_PER_WIDE_INT - 1)
1175 && ! rtx_equal_function_value_matters)
1176 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1178 /* x*2 is x+x and x*(-1) is -x */
1179 if (GET_CODE (trueop1) == CONST_DOUBLE
1180 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1181 && GET_MODE (op0) == mode)
1183 REAL_VALUE_TYPE d;
1184 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1186 if (REAL_VALUES_EQUAL (d, dconst2))
1187 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1189 if (REAL_VALUES_EQUAL (d, dconstm1))
1190 return gen_rtx_NEG (mode, op0);
1192 break;
1194 case IOR:
1195 if (trueop1 == const0_rtx)
1196 return op0;
1197 if (GET_CODE (trueop1) == CONST_INT
1198 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1199 == GET_MODE_MASK (mode)))
1200 return op1;
1201 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1202 return op0;
1203 /* A | (~A) -> -1 */
1204 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1205 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1206 && ! side_effects_p (op0)
1207 && GET_MODE_CLASS (mode) != MODE_CC)
1208 return constm1_rtx;
1209 break;
1211 case XOR:
1212 if (trueop1 == const0_rtx)
1213 return op0;
1214 if (GET_CODE (trueop1) == CONST_INT
1215 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1216 == GET_MODE_MASK (mode)))
1217 return gen_rtx_NOT (mode, op0);
1218 if (trueop0 == trueop1 && ! side_effects_p (op0)
1219 && GET_MODE_CLASS (mode) != MODE_CC)
1220 return const0_rtx;
1221 break;
1223 case AND:
1224 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1225 return const0_rtx;
1226 if (GET_CODE (trueop1) == CONST_INT
1227 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1228 == GET_MODE_MASK (mode)))
1229 return op0;
1230 if (trueop0 == trueop1 && ! side_effects_p (op0)
1231 && GET_MODE_CLASS (mode) != MODE_CC)
1232 return op0;
1233 /* A & (~A) -> 0 */
1234 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1235 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1236 && ! side_effects_p (op0)
1237 && GET_MODE_CLASS (mode) != MODE_CC)
1238 return const0_rtx;
1239 break;
1241 case UDIV:
1242 /* Convert divide by power of two into shift (divide by 1 handled
1243 below). */
1244 if (GET_CODE (trueop1) == CONST_INT
1245 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1246 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1248 /* ... fall through ... */
1250 case DIV:
1251 if (trueop1 == CONST1_RTX (mode))
1253 /* On some platforms DIV uses narrower mode than its
1254 operands. */
1255 rtx x = gen_lowpart_common (mode, op0);
1256 if (x)
1257 return x;
1258 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1259 return gen_lowpart_SUBREG (mode, op0);
1260 else
1261 return op0;
1264 /* Maybe change 0 / x to 0. This transformation isn't safe for
1265 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1266 Nor is it safe for modes with signed zeros, since dividing
1267 0 by a negative number gives -0, not 0. */
1268 if (!HONOR_NANS (mode)
1269 && !HONOR_SIGNED_ZEROS (mode)
1270 && trueop0 == CONST0_RTX (mode)
1271 && ! side_effects_p (op1))
1272 return op0;
1274 /* Change division by a constant into multiplication. Only do
1275 this with -funsafe-math-optimizations. */
1276 else if (GET_CODE (trueop1) == CONST_DOUBLE
1277 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1278 && trueop1 != CONST0_RTX (mode)
1279 && flag_unsafe_math_optimizations)
1281 REAL_VALUE_TYPE d;
1282 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1284 if (! REAL_VALUES_EQUAL (d, dconst0))
1286 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1287 return gen_rtx_MULT (mode, op0,
1288 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1291 break;
1293 case UMOD:
1294 /* Handle modulus by power of two (mod with 1 handled below). */
1295 if (GET_CODE (trueop1) == CONST_INT
1296 && exact_log2 (INTVAL (trueop1)) > 0)
1297 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1299 /* ... fall through ... */
1301 case MOD:
1302 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1303 && ! side_effects_p (op0) && ! side_effects_p (op1))
1304 return const0_rtx;
1305 break;
1307 case ROTATERT:
1308 case ROTATE:
1309 /* Rotating ~0 always results in ~0. */
1310 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1311 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1312 && ! side_effects_p (op1))
1313 return op0;
1315 /* ... fall through ... */
1317 case ASHIFT:
1318 case ASHIFTRT:
1319 case LSHIFTRT:
1320 if (trueop1 == const0_rtx)
1321 return op0;
1322 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1323 return op0;
1324 break;
1326 case SMIN:
1327 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1328 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1329 && ! side_effects_p (op0))
1330 return op1;
1331 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1332 return op0;
1333 break;
1335 case SMAX:
1336 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1337 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1338 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1339 && ! side_effects_p (op0))
1340 return op1;
1341 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1342 return op0;
1343 break;
1345 case UMIN:
1346 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1347 return op1;
1348 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1349 return op0;
1350 break;
1352 case UMAX:
1353 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1354 return op1;
1355 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1356 return op0;
1357 break;
1359 case SS_PLUS:
1360 case US_PLUS:
1361 case SS_MINUS:
1362 case US_MINUS:
1363 /* ??? There are simplifications that can be done. */
1364 return 0;
1366 default:
1367 abort ();
1370 return 0;
1373 /* Get the integer argument values in two forms:
1374 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1376 arg0 = INTVAL (trueop0);
1377 arg1 = INTVAL (trueop1);
1379 if (width < HOST_BITS_PER_WIDE_INT)
1381 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1382 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1384 arg0s = arg0;
1385 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1386 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1388 arg1s = arg1;
1389 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1390 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1392 else
1394 arg0s = arg0;
1395 arg1s = arg1;
1398 /* Compute the value of the arithmetic. */
1400 switch (code)
1402 case PLUS:
1403 val = arg0s + arg1s;
1404 break;
1406 case MINUS:
1407 val = arg0s - arg1s;
1408 break;
1410 case MULT:
1411 val = arg0s * arg1s;
1412 break;
1414 case DIV:
1415 if (arg1s == 0
1416 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1417 && arg1s == -1))
1418 return 0;
1419 val = arg0s / arg1s;
1420 break;
1422 case MOD:
1423 if (arg1s == 0
1424 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1425 && arg1s == -1))
1426 return 0;
1427 val = arg0s % arg1s;
1428 break;
1430 case UDIV:
1431 if (arg1 == 0
1432 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1433 && arg1s == -1))
1434 return 0;
1435 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1436 break;
1438 case UMOD:
1439 if (arg1 == 0
1440 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1441 && arg1s == -1))
1442 return 0;
1443 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1444 break;
1446 case AND:
1447 val = arg0 & arg1;
1448 break;
1450 case IOR:
1451 val = arg0 | arg1;
1452 break;
1454 case XOR:
1455 val = arg0 ^ arg1;
1456 break;
1458 case LSHIFTRT:
1459 /* If shift count is undefined, don't fold it; let the machine do
1460 what it wants. But truncate it if the machine will do that. */
1461 if (arg1 < 0)
1462 return 0;
1464 #ifdef SHIFT_COUNT_TRUNCATED
1465 if (SHIFT_COUNT_TRUNCATED)
1466 arg1 %= width;
1467 #endif
1469 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1470 break;
1472 case ASHIFT:
1473 if (arg1 < 0)
1474 return 0;
1476 #ifdef SHIFT_COUNT_TRUNCATED
1477 if (SHIFT_COUNT_TRUNCATED)
1478 arg1 %= width;
1479 #endif
1481 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1482 break;
1484 case ASHIFTRT:
1485 if (arg1 < 0)
1486 return 0;
1488 #ifdef SHIFT_COUNT_TRUNCATED
1489 if (SHIFT_COUNT_TRUNCATED)
1490 arg1 %= width;
1491 #endif
1493 val = arg0s >> arg1;
1495 /* Bootstrap compiler may not have sign extended the right shift.
1496 Manually extend the sign to insure bootstrap cc matches gcc. */
1497 if (arg0s < 0 && arg1 > 0)
1498 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1500 break;
1502 case ROTATERT:
1503 if (arg1 < 0)
1504 return 0;
1506 arg1 %= width;
1507 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1508 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1509 break;
1511 case ROTATE:
1512 if (arg1 < 0)
1513 return 0;
1515 arg1 %= width;
1516 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1517 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1518 break;
1520 case COMPARE:
1521 /* Do nothing here. */
1522 return 0;
1524 case SMIN:
1525 val = arg0s <= arg1s ? arg0s : arg1s;
1526 break;
1528 case UMIN:
1529 val = ((unsigned HOST_WIDE_INT) arg0
1530 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1531 break;
1533 case SMAX:
1534 val = arg0s > arg1s ? arg0s : arg1s;
1535 break;
1537 case UMAX:
1538 val = ((unsigned HOST_WIDE_INT) arg0
1539 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1540 break;
1542 default:
1543 abort ();
1546 val = trunc_int_for_mode (val, mode);
1548 return GEN_INT (val);
1551 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1552 PLUS or MINUS.
1554 Rather than test for specific case, we do this by a brute-force method
1555 and do all possible simplifications until no more changes occur. Then
1556 we rebuild the operation.
1558 If FORCE is true, then always generate the rtx. This is used to
1559 canonicalize stuff emitted from simplify_gen_binary. Note that this
1560 can still fail if the rtx is too complex. It won't fail just because
1561 the result is not 'simpler' than the input, however. */
1563 struct simplify_plus_minus_op_data
1565 rtx op;
1566 int neg;
1569 static int
1570 simplify_plus_minus_op_data_cmp (p1, p2)
1571 const void *p1;
1572 const void *p2;
1574 const struct simplify_plus_minus_op_data *d1 = p1;
1575 const struct simplify_plus_minus_op_data *d2 = p2;
1577 return (commutative_operand_precedence (d2->op)
1578 - commutative_operand_precedence (d1->op));
1581 static rtx
1582 simplify_plus_minus (code, mode, op0, op1, force)
1583 enum rtx_code code;
1584 enum machine_mode mode;
1585 rtx op0, op1;
1586 int force;
1588 struct simplify_plus_minus_op_data ops[8];
1589 rtx result, tem;
1590 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1591 int first, negate, changed;
1592 int i, j;
1594 memset ((char *) ops, 0, sizeof ops);
1596 /* Set up the two operands and then expand them until nothing has been
1597 changed. If we run out of room in our array, give up; this should
1598 almost never happen. */
1600 ops[0].op = op0;
1601 ops[0].neg = 0;
1602 ops[1].op = op1;
1603 ops[1].neg = (code == MINUS);
1607 changed = 0;
1609 for (i = 0; i < n_ops; i++)
1611 rtx this_op = ops[i].op;
1612 int this_neg = ops[i].neg;
1613 enum rtx_code this_code = GET_CODE (this_op);
1615 switch (this_code)
1617 case PLUS:
1618 case MINUS:
1619 if (n_ops == 7)
1620 return NULL_RTX;
1622 ops[n_ops].op = XEXP (this_op, 1);
1623 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1624 n_ops++;
1626 ops[i].op = XEXP (this_op, 0);
1627 input_ops++;
1628 changed = 1;
1629 break;
1631 case NEG:
1632 ops[i].op = XEXP (this_op, 0);
1633 ops[i].neg = ! this_neg;
1634 changed = 1;
1635 break;
1637 case CONST:
1638 if (n_ops < 7
1639 && GET_CODE (XEXP (this_op, 0)) == PLUS
1640 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1641 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1643 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1644 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1645 ops[n_ops].neg = this_neg;
1646 n_ops++;
1647 input_consts++;
1648 changed = 1;
1650 break;
1652 case NOT:
1653 /* ~a -> (-a - 1) */
1654 if (n_ops != 7)
1656 ops[n_ops].op = constm1_rtx;
1657 ops[n_ops++].neg = this_neg;
1658 ops[i].op = XEXP (this_op, 0);
1659 ops[i].neg = !this_neg;
1660 changed = 1;
1662 break;
1664 case CONST_INT:
1665 if (this_neg)
1667 ops[i].op = neg_const_int (mode, this_op);
1668 ops[i].neg = 0;
1669 changed = 1;
1671 break;
1673 default:
1674 break;
1678 while (changed);
1680 /* If we only have two operands, we can't do anything. */
1681 if (n_ops <= 2 && !force)
1682 return NULL_RTX;
1684 /* Count the number of CONSTs we didn't split above. */
1685 for (i = 0; i < n_ops; i++)
1686 if (GET_CODE (ops[i].op) == CONST)
1687 input_consts++;
1689 /* Now simplify each pair of operands until nothing changes. The first
1690 time through just simplify constants against each other. */
1692 first = 1;
1695 changed = first;
1697 for (i = 0; i < n_ops - 1; i++)
1698 for (j = i + 1; j < n_ops; j++)
1700 rtx lhs = ops[i].op, rhs = ops[j].op;
1701 int lneg = ops[i].neg, rneg = ops[j].neg;
1703 if (lhs != 0 && rhs != 0
1704 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1706 enum rtx_code ncode = PLUS;
1708 if (lneg != rneg)
1710 ncode = MINUS;
1711 if (lneg)
1712 tem = lhs, lhs = rhs, rhs = tem;
1714 else if (swap_commutative_operands_p (lhs, rhs))
1715 tem = lhs, lhs = rhs, rhs = tem;
1717 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1719 /* Reject "simplifications" that just wrap the two
1720 arguments in a CONST. Failure to do so can result
1721 in infinite recursion with simplify_binary_operation
1722 when it calls us to simplify CONST operations. */
1723 if (tem
1724 && ! (GET_CODE (tem) == CONST
1725 && GET_CODE (XEXP (tem, 0)) == ncode
1726 && XEXP (XEXP (tem, 0), 0) == lhs
1727 && XEXP (XEXP (tem, 0), 1) == rhs)
1728 /* Don't allow -x + -1 -> ~x simplifications in the
1729 first pass. This allows us the chance to combine
1730 the -1 with other constants. */
1731 && ! (first
1732 && GET_CODE (tem) == NOT
1733 && XEXP (tem, 0) == rhs))
1735 lneg &= rneg;
1736 if (GET_CODE (tem) == NEG)
1737 tem = XEXP (tem, 0), lneg = !lneg;
1738 if (GET_CODE (tem) == CONST_INT && lneg)
1739 tem = neg_const_int (mode, tem), lneg = 0;
1741 ops[i].op = tem;
1742 ops[i].neg = lneg;
1743 ops[j].op = NULL_RTX;
1744 changed = 1;
1749 first = 0;
1751 while (changed);
1753 /* Pack all the operands to the lower-numbered entries. */
1754 for (i = 0, j = 0; j < n_ops; j++)
1755 if (ops[j].op)
1756 ops[i++] = ops[j];
1757 n_ops = i;
1759 /* Sort the operations based on swap_commutative_operands_p. */
1760 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1762 /* We suppressed creation of trivial CONST expressions in the
1763 combination loop to avoid recursion. Create one manually now.
1764 The combination loop should have ensured that there is exactly
1765 one CONST_INT, and the sort will have ensured that it is last
1766 in the array and that any other constant will be next-to-last. */
1768 if (n_ops > 1
1769 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1770 && CONSTANT_P (ops[n_ops - 2].op))
1772 rtx value = ops[n_ops - 1].op;
1773 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1774 value = neg_const_int (mode, value);
1775 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1776 n_ops--;
1779 /* Count the number of CONSTs that we generated. */
1780 n_consts = 0;
1781 for (i = 0; i < n_ops; i++)
1782 if (GET_CODE (ops[i].op) == CONST)
1783 n_consts++;
1785 /* Give up if we didn't reduce the number of operands we had. Make
1786 sure we count a CONST as two operands. If we have the same
1787 number of operands, but have made more CONSTs than before, this
1788 is also an improvement, so accept it. */
1789 if (!force
1790 && (n_ops + n_consts > input_ops
1791 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1792 return NULL_RTX;
1794 /* Put a non-negated operand first. If there aren't any, make all
1795 operands positive and negate the whole thing later. */
1797 negate = 0;
1798 for (i = 0; i < n_ops && ops[i].neg; i++)
1799 continue;
1800 if (i == n_ops)
1802 for (i = 0; i < n_ops; i++)
1803 ops[i].neg = 0;
1804 negate = 1;
1806 else if (i != 0)
1808 tem = ops[0].op;
1809 ops[0] = ops[i];
1810 ops[i].op = tem;
1811 ops[i].neg = 1;
1814 /* Now make the result by performing the requested operations. */
1815 result = ops[0].op;
1816 for (i = 1; i < n_ops; i++)
1817 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1818 mode, result, ops[i].op);
1820 return negate ? gen_rtx_NEG (mode, result) : result;
1823 /* Like simplify_binary_operation except used for relational operators.
1824 MODE is the mode of the operands, not that of the result. If MODE
1825 is VOIDmode, both operands must also be VOIDmode and we compare the
1826 operands in "infinite precision".
1828 If no simplification is possible, this function returns zero. Otherwise,
1829 it returns either const_true_rtx or const0_rtx. */
1832 simplify_relational_operation (code, mode, op0, op1)
1833 enum rtx_code code;
1834 enum machine_mode mode;
1835 rtx op0, op1;
1837 int equal, op0lt, op0ltu, op1lt, op1ltu;
1838 rtx tem;
1839 rtx trueop0;
1840 rtx trueop1;
1842 if (mode == VOIDmode
1843 && (GET_MODE (op0) != VOIDmode
1844 || GET_MODE (op1) != VOIDmode))
1845 abort ();
1847 /* If op0 is a compare, extract the comparison arguments from it. */
1848 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1849 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1851 trueop0 = avoid_constant_pool_reference (op0);
1852 trueop1 = avoid_constant_pool_reference (op1);
1854 /* We can't simplify MODE_CC values since we don't know what the
1855 actual comparison is. */
1856 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1857 #ifdef HAVE_cc0
1858 || op0 == cc0_rtx
1859 #endif
1861 return 0;
1863 /* Make sure the constant is second. */
1864 if (swap_commutative_operands_p (trueop0, trueop1))
1866 tem = op0, op0 = op1, op1 = tem;
1867 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1868 code = swap_condition (code);
1871 /* For integer comparisons of A and B maybe we can simplify A - B and can
1872 then simplify a comparison of that with zero. If A and B are both either
1873 a register or a CONST_INT, this can't help; testing for these cases will
1874 prevent infinite recursion here and speed things up.
1876 If CODE is an unsigned comparison, then we can never do this optimization,
1877 because it gives an incorrect result if the subtraction wraps around zero.
1878 ANSI C defines unsigned operations such that they never overflow, and
1879 thus such cases can not be ignored. */
1881 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
1882 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
1883 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
1884 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1885 && code != GTU && code != GEU && code != LTU && code != LEU)
1886 return simplify_relational_operation (signed_condition (code),
1887 mode, tem, const0_rtx);
1889 if (flag_unsafe_math_optimizations && code == ORDERED)
1890 return const_true_rtx;
1892 if (flag_unsafe_math_optimizations && code == UNORDERED)
1893 return const0_rtx;
1895 /* For modes without NaNs, if the two operands are equal, we know the
1896 result. */
1897 if (!HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1))
1898 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1900 /* If the operands are floating-point constants, see if we can fold
1901 the result. */
1902 else if (GET_CODE (trueop0) == CONST_DOUBLE
1903 && GET_CODE (trueop1) == CONST_DOUBLE
1904 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
1906 REAL_VALUE_TYPE d0, d1;
1908 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
1909 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
1911 /* Comparisons are unordered iff at least one of the values is NaN. */
1912 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
1913 switch (code)
1915 case UNEQ:
1916 case UNLT:
1917 case UNGT:
1918 case UNLE:
1919 case UNGE:
1920 case NE:
1921 case UNORDERED:
1922 return const_true_rtx;
1923 case EQ:
1924 case LT:
1925 case GT:
1926 case LE:
1927 case GE:
1928 case LTGT:
1929 case ORDERED:
1930 return const0_rtx;
1931 default:
1932 return 0;
1935 equal = REAL_VALUES_EQUAL (d0, d1);
1936 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
1937 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
1940 /* Otherwise, see if the operands are both integers. */
1941 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
1942 && (GET_CODE (trueop0) == CONST_DOUBLE
1943 || GET_CODE (trueop0) == CONST_INT)
1944 && (GET_CODE (trueop1) == CONST_DOUBLE
1945 || GET_CODE (trueop1) == CONST_INT))
1947 int width = GET_MODE_BITSIZE (mode);
1948 HOST_WIDE_INT l0s, h0s, l1s, h1s;
1949 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
1951 /* Get the two words comprising each integer constant. */
1952 if (GET_CODE (trueop0) == CONST_DOUBLE)
1954 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
1955 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
1957 else
1959 l0u = l0s = INTVAL (trueop0);
1960 h0u = h0s = HWI_SIGN_EXTEND (l0s);
1963 if (GET_CODE (trueop1) == CONST_DOUBLE)
1965 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
1966 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
1968 else
1970 l1u = l1s = INTVAL (trueop1);
1971 h1u = h1s = HWI_SIGN_EXTEND (l1s);
1974 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1975 we have to sign or zero-extend the values. */
1976 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
1978 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
1979 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
1981 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1982 l0s |= ((HOST_WIDE_INT) (-1) << width);
1984 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1985 l1s |= ((HOST_WIDE_INT) (-1) << width);
1987 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
1988 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
1990 equal = (h0u == h1u && l0u == l1u);
1991 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
1992 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
1993 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
1994 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
1997 /* Otherwise, there are some code-specific tests we can make. */
1998 else
2000 switch (code)
2002 case EQ:
2003 /* References to the frame plus a constant or labels cannot
2004 be zero, but a SYMBOL_REF can due to #pragma weak. */
2005 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2006 || GET_CODE (trueop0) == LABEL_REF)
2007 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2008 /* On some machines, the ap reg can be 0 sometimes. */
2009 && op0 != arg_pointer_rtx
2010 #endif
2012 return const0_rtx;
2013 break;
2015 case NE:
2016 if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2017 || GET_CODE (trueop0) == LABEL_REF)
2018 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2019 && op0 != arg_pointer_rtx
2020 #endif
2022 return const_true_rtx;
2023 break;
2025 case GEU:
2026 /* Unsigned values are never negative. */
2027 if (trueop1 == const0_rtx)
2028 return const_true_rtx;
2029 break;
2031 case LTU:
2032 if (trueop1 == const0_rtx)
2033 return const0_rtx;
2034 break;
2036 case LEU:
2037 /* Unsigned values are never greater than the largest
2038 unsigned value. */
2039 if (GET_CODE (trueop1) == CONST_INT
2040 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2041 && INTEGRAL_MODE_P (mode))
2042 return const_true_rtx;
2043 break;
2045 case GTU:
2046 if (GET_CODE (trueop1) == CONST_INT
2047 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2048 && INTEGRAL_MODE_P (mode))
2049 return const0_rtx;
2050 break;
2052 default:
2053 break;
2056 return 0;
2059 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2060 as appropriate. */
2061 switch (code)
2063 case EQ:
2064 case UNEQ:
2065 return equal ? const_true_rtx : const0_rtx;
2066 case NE:
2067 case LTGT:
2068 return ! equal ? const_true_rtx : const0_rtx;
2069 case LT:
2070 case UNLT:
2071 return op0lt ? const_true_rtx : const0_rtx;
2072 case GT:
2073 case UNGT:
2074 return op1lt ? const_true_rtx : const0_rtx;
2075 case LTU:
2076 return op0ltu ? const_true_rtx : const0_rtx;
2077 case GTU:
2078 return op1ltu ? const_true_rtx : const0_rtx;
2079 case LE:
2080 case UNLE:
2081 return equal || op0lt ? const_true_rtx : const0_rtx;
2082 case GE:
2083 case UNGE:
2084 return equal || op1lt ? const_true_rtx : const0_rtx;
2085 case LEU:
2086 return equal || op0ltu ? const_true_rtx : const0_rtx;
2087 case GEU:
2088 return equal || op1ltu ? const_true_rtx : const0_rtx;
2089 case ORDERED:
2090 return const_true_rtx;
2091 case UNORDERED:
2092 return const0_rtx;
2093 default:
2094 abort ();
2098 /* Simplify CODE, an operation with result mode MODE and three operands,
2099 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2100 a constant. Return 0 if no simplifications is possible. */
2103 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2104 enum rtx_code code;
2105 enum machine_mode mode, op0_mode;
2106 rtx op0, op1, op2;
2108 unsigned int width = GET_MODE_BITSIZE (mode);
2110 /* VOIDmode means "infinite" precision. */
2111 if (width == 0)
2112 width = HOST_BITS_PER_WIDE_INT;
2114 switch (code)
2116 case SIGN_EXTRACT:
2117 case ZERO_EXTRACT:
2118 if (GET_CODE (op0) == CONST_INT
2119 && GET_CODE (op1) == CONST_INT
2120 && GET_CODE (op2) == CONST_INT
2121 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2122 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2124 /* Extracting a bit-field from a constant */
2125 HOST_WIDE_INT val = INTVAL (op0);
2127 if (BITS_BIG_ENDIAN)
2128 val >>= (GET_MODE_BITSIZE (op0_mode)
2129 - INTVAL (op2) - INTVAL (op1));
2130 else
2131 val >>= INTVAL (op2);
2133 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2135 /* First zero-extend. */
2136 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2137 /* If desired, propagate sign bit. */
2138 if (code == SIGN_EXTRACT
2139 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2140 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2143 /* Clear the bits that don't belong in our mode,
2144 unless they and our sign bit are all one.
2145 So we get either a reasonable negative value or a reasonable
2146 unsigned value for this mode. */
2147 if (width < HOST_BITS_PER_WIDE_INT
2148 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2149 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2150 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2152 return GEN_INT (val);
2154 break;
2156 case IF_THEN_ELSE:
2157 if (GET_CODE (op0) == CONST_INT)
2158 return op0 != const0_rtx ? op1 : op2;
2160 /* Convert a == b ? b : a to "a". */
2161 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2162 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2163 && rtx_equal_p (XEXP (op0, 0), op1)
2164 && rtx_equal_p (XEXP (op0, 1), op2))
2165 return op1;
2166 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2167 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
2168 && rtx_equal_p (XEXP (op0, 1), op1)
2169 && rtx_equal_p (XEXP (op0, 0), op2))
2170 return op2;
2171 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2173 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2174 ? GET_MODE (XEXP (op0, 1))
2175 : GET_MODE (XEXP (op0, 0)));
2176 rtx temp;
2177 if (cmp_mode == VOIDmode)
2178 cmp_mode = op0_mode;
2179 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2180 XEXP (op0, 0), XEXP (op0, 1));
2182 /* See if any simplifications were possible. */
2183 if (temp == const0_rtx)
2184 return op2;
2185 else if (temp == const1_rtx)
2186 return op1;
2187 else if (temp)
2188 op0 = temp;
2190 /* Look for happy constants in op1 and op2. */
2191 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2193 HOST_WIDE_INT t = INTVAL (op1);
2194 HOST_WIDE_INT f = INTVAL (op2);
2196 if (t == STORE_FLAG_VALUE && f == 0)
2197 code = GET_CODE (op0);
2198 else if (t == 0 && f == STORE_FLAG_VALUE)
2200 enum rtx_code tmp;
2201 tmp = reversed_comparison_code (op0, NULL_RTX);
2202 if (tmp == UNKNOWN)
2203 break;
2204 code = tmp;
2206 else
2207 break;
2209 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2212 break;
2214 default:
2215 abort ();
2218 return 0;
2221 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2222 Return 0 if no simplifications is possible. */
2224 simplify_subreg (outermode, op, innermode, byte)
2225 rtx op;
2226 unsigned int byte;
2227 enum machine_mode outermode, innermode;
2229 /* Little bit of sanity checking. */
2230 if (innermode == VOIDmode || outermode == VOIDmode
2231 || innermode == BLKmode || outermode == BLKmode)
2232 abort ();
2234 if (GET_MODE (op) != innermode
2235 && GET_MODE (op) != VOIDmode)
2236 abort ();
2238 if (byte % GET_MODE_SIZE (outermode)
2239 || byte >= GET_MODE_SIZE (innermode))
2240 abort ();
2242 if (outermode == innermode && !byte)
2243 return op;
2245 /* Attempt to simplify constant to non-SUBREG expression. */
2246 if (CONSTANT_P (op))
2248 int offset, part;
2249 unsigned HOST_WIDE_INT val = 0;
2251 /* ??? This code is partly redundant with code below, but can handle
2252 the subregs of floats and similar corner cases.
2253 Later it we should move all simplification code here and rewrite
2254 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2255 using SIMPLIFY_SUBREG. */
2256 if (subreg_lowpart_offset (outermode, innermode) == byte)
2258 rtx new = gen_lowpart_if_possible (outermode, op);
2259 if (new)
2260 return new;
2263 /* Similar comment as above apply here. */
2264 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2265 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2266 && GET_MODE_CLASS (outermode) == MODE_INT)
2268 rtx new = constant_subword (op,
2269 (byte / UNITS_PER_WORD),
2270 innermode);
2271 if (new)
2272 return new;
2275 offset = byte * BITS_PER_UNIT;
2276 switch (GET_CODE (op))
2278 case CONST_DOUBLE:
2279 if (GET_MODE (op) != VOIDmode)
2280 break;
2282 /* We can't handle this case yet. */
2283 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2284 return NULL_RTX;
2286 part = offset >= HOST_BITS_PER_WIDE_INT;
2287 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2288 && BYTES_BIG_ENDIAN)
2289 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2290 && WORDS_BIG_ENDIAN))
2291 part = !part;
2292 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2293 offset %= HOST_BITS_PER_WIDE_INT;
2295 /* We've already picked the word we want from a double, so
2296 pretend this is actually an integer. */
2297 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2299 /* FALLTHROUGH */
2300 case CONST_INT:
2301 if (GET_CODE (op) == CONST_INT)
2302 val = INTVAL (op);
2304 /* We don't handle synthetizing of non-integral constants yet. */
2305 if (GET_MODE_CLASS (outermode) != MODE_INT)
2306 return NULL_RTX;
2308 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2310 if (WORDS_BIG_ENDIAN)
2311 offset = (GET_MODE_BITSIZE (innermode)
2312 - GET_MODE_BITSIZE (outermode) - offset);
2313 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2314 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2315 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2316 - 2 * (offset % BITS_PER_WORD));
2319 if (offset >= HOST_BITS_PER_WIDE_INT)
2320 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2321 else
2323 val >>= offset;
2324 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2325 val = trunc_int_for_mode (val, outermode);
2326 return GEN_INT (val);
2328 default:
2329 break;
2333 /* Changing mode twice with SUBREG => just change it once,
2334 or not at all if changing back op starting mode. */
2335 if (GET_CODE (op) == SUBREG)
2337 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2338 int final_offset = byte + SUBREG_BYTE (op);
2339 rtx new;
2341 if (outermode == innermostmode
2342 && byte == 0 && SUBREG_BYTE (op) == 0)
2343 return SUBREG_REG (op);
2345 /* The SUBREG_BYTE represents offset, as if the value were stored
2346 in memory. Irritating exception is paradoxical subreg, where
2347 we define SUBREG_BYTE to be 0. On big endian machines, this
2348 value should be negative. For a moment, undo this exception. */
2349 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2351 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2352 if (WORDS_BIG_ENDIAN)
2353 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2354 if (BYTES_BIG_ENDIAN)
2355 final_offset += difference % UNITS_PER_WORD;
2357 if (SUBREG_BYTE (op) == 0
2358 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2360 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2361 if (WORDS_BIG_ENDIAN)
2362 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2363 if (BYTES_BIG_ENDIAN)
2364 final_offset += difference % UNITS_PER_WORD;
2367 /* See whether resulting subreg will be paradoxical. */
2368 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2370 /* In nonparadoxical subregs we can't handle negative offsets. */
2371 if (final_offset < 0)
2372 return NULL_RTX;
2373 /* Bail out in case resulting subreg would be incorrect. */
2374 if (final_offset % GET_MODE_SIZE (outermode)
2375 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2376 return NULL_RTX;
2378 else
2380 int offset = 0;
2381 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2383 /* In paradoxical subreg, see if we are still looking on lower part.
2384 If so, our SUBREG_BYTE will be 0. */
2385 if (WORDS_BIG_ENDIAN)
2386 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2387 if (BYTES_BIG_ENDIAN)
2388 offset += difference % UNITS_PER_WORD;
2389 if (offset == final_offset)
2390 final_offset = 0;
2391 else
2392 return NULL_RTX;
2395 /* Recurse for futher possible simplifications. */
2396 new = simplify_subreg (outermode, SUBREG_REG (op),
2397 GET_MODE (SUBREG_REG (op)),
2398 final_offset);
2399 if (new)
2400 return new;
2401 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2404 /* SUBREG of a hard register => just change the register number
2405 and/or mode. If the hard register is not valid in that mode,
2406 suppress this simplification. If the hard register is the stack,
2407 frame, or argument pointer, leave this as a SUBREG. */
2409 if (REG_P (op)
2410 && (! REG_FUNCTION_VALUE_P (op)
2411 || ! rtx_equal_function_value_matters)
2412 #ifdef CLASS_CANNOT_CHANGE_MODE
2413 && ! (CLASS_CANNOT_CHANGE_MODE_P (outermode, innermode)
2414 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2415 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT
2416 && (TEST_HARD_REG_BIT
2417 (reg_class_contents[(int) CLASS_CANNOT_CHANGE_MODE],
2418 REGNO (op))))
2419 #endif
2420 && REGNO (op) < FIRST_PSEUDO_REGISTER
2421 && ((reload_completed && !frame_pointer_needed)
2422 || (REGNO (op) != FRAME_POINTER_REGNUM
2423 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2424 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2425 #endif
2427 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2428 && REGNO (op) != ARG_POINTER_REGNUM
2429 #endif
2430 && REGNO (op) != STACK_POINTER_REGNUM)
2432 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2435 /* ??? We do allow it if the current REG is not valid for
2436 its mode. This is a kludge to work around how float/complex
2437 arguments are passed on 32-bit Sparc and should be fixed. */
2438 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2439 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2441 rtx x = gen_rtx_REG (outermode, final_regno);
2443 /* Propagate original regno. We don't have any way to specify
2444 the offset inside orignal regno, so do so only for lowpart.
2445 The information is used only by alias analysis that can not
2446 grog partial register anyway. */
2448 if (subreg_lowpart_offset (outermode, innermode) == byte)
2449 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2450 return x;
2454 /* If we have a SUBREG of a register that we are replacing and we are
2455 replacing it with a MEM, make a new MEM and try replacing the
2456 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2457 or if we would be widening it. */
2459 if (GET_CODE (op) == MEM
2460 && ! mode_dependent_address_p (XEXP (op, 0))
2461 /* Allow splitting of volatile memory references in case we don't
2462 have instruction to move the whole thing. */
2463 && (! MEM_VOLATILE_P (op)
2464 || ! have_insn_for (SET, innermode))
2465 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2466 return adjust_address_nv (op, outermode, byte);
2468 /* Handle complex values represented as CONCAT
2469 of real and imaginary part. */
2470 if (GET_CODE (op) == CONCAT)
2472 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2473 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2474 unsigned int final_offset;
2475 rtx res;
2477 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2478 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2479 if (res)
2480 return res;
2481 /* We can at least simplify it by referring directly to the relevant part. */
2482 return gen_rtx_SUBREG (outermode, part, final_offset);
2485 return NULL_RTX;
2487 /* Make a SUBREG operation or equivalent if it folds. */
2490 simplify_gen_subreg (outermode, op, innermode, byte)
2491 rtx op;
2492 unsigned int byte;
2493 enum machine_mode outermode, innermode;
2495 rtx new;
2496 /* Little bit of sanity checking. */
2497 if (innermode == VOIDmode || outermode == VOIDmode
2498 || innermode == BLKmode || outermode == BLKmode)
2499 abort ();
2501 if (GET_MODE (op) != innermode
2502 && GET_MODE (op) != VOIDmode)
2503 abort ();
2505 if (byte % GET_MODE_SIZE (outermode)
2506 || byte >= GET_MODE_SIZE (innermode))
2507 abort ();
2509 if (GET_CODE (op) == QUEUED)
2510 return NULL_RTX;
2512 new = simplify_subreg (outermode, op, innermode, byte);
2513 if (new)
2514 return new;
2516 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2517 return NULL_RTX;
2519 return gen_rtx_SUBREG (outermode, op, byte);
2521 /* Simplify X, an rtx expression.
2523 Return the simplified expression or NULL if no simplifications
2524 were possible.
2526 This is the preferred entry point into the simplification routines;
2527 however, we still allow passes to call the more specific routines.
2529 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2530 code that need to be unified.
2532 1. fold_rtx in cse.c. This code uses various CSE specific
2533 information to aid in RTL simplification.
2535 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2536 it uses combine specific information to aid in RTL
2537 simplification.
2539 3. The routines in this file.
2542 Long term we want to only have one body of simplification code; to
2543 get to that state I recommend the following steps:
2545 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2546 which are not pass dependent state into these routines.
2548 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2549 use this routine whenever possible.
2551 3. Allow for pass dependent state to be provided to these
2552 routines and add simplifications based on the pass dependent
2553 state. Remove code from cse.c & combine.c that becomes
2554 redundant/dead.
2556 It will take time, but ultimately the compiler will be easier to
2557 maintain and improve. It's totally silly that when we add a
2558 simplification that it needs to be added to 4 places (3 for RTL
2559 simplification and 1 for tree simplification. */
2562 simplify_rtx (x)
2563 rtx x;
2565 enum rtx_code code = GET_CODE (x);
2566 enum machine_mode mode = GET_MODE (x);
2568 switch (GET_RTX_CLASS (code))
2570 case '1':
2571 return simplify_unary_operation (code, mode,
2572 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2573 case 'c':
2574 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2576 rtx tem;
2578 tem = XEXP (x, 0);
2579 XEXP (x, 0) = XEXP (x, 1);
2580 XEXP (x, 1) = tem;
2581 return simplify_binary_operation (code, mode,
2582 XEXP (x, 0), XEXP (x, 1));
2585 case '2':
2586 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2588 case '3':
2589 case 'b':
2590 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2591 XEXP (x, 0), XEXP (x, 1),
2592 XEXP (x, 2));
2594 case '<':
2595 return simplify_relational_operation (code,
2596 ((GET_MODE (XEXP (x, 0))
2597 != VOIDmode)
2598 ? GET_MODE (XEXP (x, 0))
2599 : GET_MODE (XEXP (x, 1))),
2600 XEXP (x, 0), XEXP (x, 1));
2601 case 'x':
2602 /* The only case we try to handle is a SUBREG. */
2603 if (code == SUBREG)
2604 return simplify_gen_subreg (mode, SUBREG_REG (x),
2605 GET_MODE (SUBREG_REG (x)),
2606 SUBREG_BYTE (x));
2607 return NULL;
2608 default:
2609 return NULL;