1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
33 #include "insn-config.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
47 virtual regs here because the simplify_*_operation routines are called
48 by integrate.c, which is called before virtual register instantiation.
50 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
51 a header file so that their definitions can be shared with the
52 simplification routines in simplify-rtx.c. Until then, do not
53 change these macros without also changing the copy in simplify-rtx.c. */
55 #define FIXED_BASE_PLUS_P(X) \
56 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
57 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
58 || (X) == virtual_stack_vars_rtx \
59 || (X) == virtual_incoming_args_rtx \
60 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
61 && (XEXP (X, 0) == frame_pointer_rtx \
62 || XEXP (X, 0) == hard_frame_pointer_rtx \
63 || ((X) == arg_pointer_rtx \
64 && fixed_regs[ARG_POINTER_REGNUM]) \
65 || XEXP (X, 0) == virtual_stack_vars_rtx \
66 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
67 || GET_CODE (X) == ADDRESSOF)
69 /* Similar, but also allows reference to the stack pointer.
71 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
72 arg_pointer_rtx by itself is nonzero, because on at least one machine,
73 the i960, the arg pointer is zero when it is unused. */
75 #define NONZERO_BASE_PLUS_P(X) \
76 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
77 || (X) == virtual_stack_vars_rtx \
78 || (X) == virtual_incoming_args_rtx \
79 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
80 && (XEXP (X, 0) == frame_pointer_rtx \
81 || XEXP (X, 0) == hard_frame_pointer_rtx \
82 || ((X) == arg_pointer_rtx \
83 && fixed_regs[ARG_POINTER_REGNUM]) \
84 || XEXP (X, 0) == virtual_stack_vars_rtx \
85 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
86 || (X) == stack_pointer_rtx \
87 || (X) == virtual_stack_dynamic_rtx \
88 || (X) == virtual_outgoing_args_rtx \
89 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
90 && (XEXP (X, 0) == stack_pointer_rtx \
91 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
92 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
93 || GET_CODE (X) == ADDRESSOF)
95 /* Much code operates on (low, high) pairs; the low value is an
96 unsigned wide int, the high value a signed wide int. We
97 occasionally need to sign extend from low to high as if low were a
99 #define HWI_SIGN_EXTEND(low) \
100 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
102 static rtx simplify_plus_minus
PARAMS ((enum rtx_code
,
103 enum machine_mode
, rtx
, rtx
));
104 static void check_fold_consts
PARAMS ((PTR
));
105 static int entry_and_rtx_equal_p
PARAMS ((const void *, const void *));
106 static unsigned int get_value_hash
PARAMS ((const void *));
107 static struct elt_list
*new_elt_list
PARAMS ((struct elt_list
*,
109 static struct elt_loc_list
*new_elt_loc_list
PARAMS ((struct elt_loc_list
*,
111 static void unchain_one_value
PARAMS ((cselib_val
*));
112 static void unchain_one_elt_list
PARAMS ((struct elt_list
**));
113 static void unchain_one_elt_loc_list
PARAMS ((struct elt_loc_list
**));
114 static void clear_table
PARAMS ((int));
115 static int discard_useless_locs
PARAMS ((void **, void *));
116 static int discard_useless_values
PARAMS ((void **, void *));
117 static void remove_useless_values
PARAMS ((void));
118 static rtx wrap_constant
PARAMS ((enum machine_mode
, rtx
));
119 static unsigned int hash_rtx
PARAMS ((rtx
, enum machine_mode
, int));
120 static cselib_val
*new_cselib_val
PARAMS ((unsigned int,
122 static void add_mem_for_addr
PARAMS ((cselib_val
*, cselib_val
*,
124 static cselib_val
*cselib_lookup_mem
PARAMS ((rtx
, int));
125 static rtx cselib_subst_to_values
PARAMS ((rtx
));
126 static void cselib_invalidate_regno
PARAMS ((unsigned int,
128 static int cselib_mem_conflict_p
PARAMS ((rtx
, rtx
));
129 static int cselib_invalidate_mem_1
PARAMS ((void **, void *));
130 static void cselib_invalidate_mem
PARAMS ((rtx
));
131 static void cselib_invalidate_rtx
PARAMS ((rtx
, rtx
, void *));
132 static void cselib_record_set
PARAMS ((rtx
, cselib_val
*,
134 static void cselib_record_sets
PARAMS ((rtx
));
136 /* There are three ways in which cselib can look up an rtx:
137 - for a REG, the reg_values table (which is indexed by regno) is used
138 - for a MEM, we recursively look up its address and then follow the
139 addr_list of that value
140 - for everything else, we compute a hash value and go through the hash
141 table. Since different rtx's can still have the same hash value,
142 this involves walking the table entries for a given value and comparing
143 the locations of the entries with the rtx we are looking up. */
145 /* A table that enables us to look up elts by their value. */
146 static htab_t hash_table
;
148 /* This is a global so we don't have to pass this through every function.
149 It is used in new_elt_loc_list to set SETTING_INSN. */
150 static rtx cselib_current_insn
;
152 /* Every new unknown value gets a unique number. */
153 static unsigned int next_unknown_value
;
155 /* The number of registers we had when the varrays were last resized. */
156 static unsigned int cselib_nregs
;
158 /* Count values without known locations. Whenever this grows too big, we
159 remove these useless values from the table. */
160 static int n_useless_values
;
162 /* Number of useless values before we remove them from the hash table. */
163 #define MAX_USELESS_VALUES 32
165 /* This table maps from register number to values. It does not contain
166 pointers to cselib_val structures, but rather elt_lists. The purpose is
167 to be able to refer to the same register in different modes. */
168 static varray_type reg_values
;
169 #define REG_VALUES(I) VARRAY_ELT_LIST (reg_values, (I))
171 /* Here the set of indices I with REG_VALUES(I) != 0 is saved. This is used
172 in clear_table() for fast emptying. */
173 static varray_type used_regs
;
175 /* We pass this to cselib_invalidate_mem to invalidate all of
176 memory for a non-const call instruction. */
179 /* Memory for our structures is allocated from this obstack. */
180 static struct obstack cselib_obstack
;
182 /* Used to quickly free all memory. */
183 static char *cselib_startobj
;
185 /* Caches for unused structures. */
186 static cselib_val
*empty_vals
;
187 static struct elt_list
*empty_elt_lists
;
188 static struct elt_loc_list
*empty_elt_loc_lists
;
190 /* Set by discard_useless_locs if it deleted the last location of any
192 static int values_became_useless
;
194 /* Make a binary operation by properly ordering the operands and
195 seeing if the expression folds. */
198 simplify_gen_binary (code
, mode
, op0
, op1
)
200 enum machine_mode mode
;
205 /* Put complex operands first and constants second if commutative. */
206 if (GET_RTX_CLASS (code
) == 'c'
207 && ((CONSTANT_P (op0
) && GET_CODE (op1
) != CONST_INT
)
208 || (GET_RTX_CLASS (GET_CODE (op0
)) == 'o'
209 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')
210 || (GET_CODE (op0
) == SUBREG
211 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0
))) == 'o'
212 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')))
213 tem
= op0
, op0
= op1
, op1
= tem
;
215 /* If this simplifies, do it. */
216 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
221 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
222 just form the operation. */
224 if (code
== PLUS
&& GET_CODE (op1
) == CONST_INT
225 && GET_MODE (op0
) != VOIDmode
)
226 return plus_constant (op0
, INTVAL (op1
));
227 else if (code
== MINUS
&& GET_CODE (op1
) == CONST_INT
228 && GET_MODE (op0
) != VOIDmode
)
229 return plus_constant (op0
, - INTVAL (op1
));
231 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
234 /* Try to simplify a unary operation CODE whose output mode is to be
235 MODE with input operand OP whose mode was originally OP_MODE.
236 Return zero if no simplification can be made. */
239 simplify_unary_operation (code
, mode
, op
, op_mode
)
241 enum machine_mode mode
;
243 enum machine_mode op_mode
;
245 unsigned int width
= GET_MODE_BITSIZE (mode
);
247 /* The order of these tests is critical so that, for example, we don't
248 check the wrong mode (input vs. output) for a conversion operation,
249 such as FIX. At some point, this should be simplified. */
251 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
253 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
254 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
256 HOST_WIDE_INT hv
, lv
;
259 if (GET_CODE (op
) == CONST_INT
)
260 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
262 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
264 #ifdef REAL_ARITHMETIC
265 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
270 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
271 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
272 d
+= (double) (unsigned HOST_WIDE_INT
) (~ lv
);
278 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
279 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
280 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
282 #endif /* REAL_ARITHMETIC */
283 d
= real_value_truncate (mode
, d
);
284 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
286 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
287 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
289 HOST_WIDE_INT hv
, lv
;
292 if (GET_CODE (op
) == CONST_INT
)
293 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
295 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
297 if (op_mode
== VOIDmode
)
299 /* We don't know how to interpret negative-looking numbers in
300 this case, so don't try to fold those. */
304 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
307 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
309 #ifdef REAL_ARITHMETIC
310 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
313 d
= (double) (unsigned HOST_WIDE_INT
) hv
;
314 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
315 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
316 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
317 #endif /* REAL_ARITHMETIC */
318 d
= real_value_truncate (mode
, d
);
319 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
323 if (GET_CODE (op
) == CONST_INT
324 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
326 register HOST_WIDE_INT arg0
= INTVAL (op
);
327 register HOST_WIDE_INT val
;
340 val
= (arg0
>= 0 ? arg0
: - arg0
);
344 /* Don't use ffs here. Instead, get low order bit and then its
345 number. If arg0 is zero, this will return 0, as desired. */
346 arg0
&= GET_MODE_MASK (mode
);
347 val
= exact_log2 (arg0
& (- arg0
)) + 1;
355 if (op_mode
== VOIDmode
)
357 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
359 /* If we were really extending the mode,
360 we would have to distinguish between zero-extension
361 and sign-extension. */
362 if (width
!= GET_MODE_BITSIZE (op_mode
))
366 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
367 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
373 if (op_mode
== VOIDmode
)
375 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
377 /* If we were really extending the mode,
378 we would have to distinguish between zero-extension
379 and sign-extension. */
380 if (width
!= GET_MODE_BITSIZE (op_mode
))
384 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
387 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
389 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
390 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
405 val
= trunc_int_for_mode (val
, mode
);
407 return GEN_INT (val
);
410 /* We can do some operations on integer CONST_DOUBLEs. Also allow
411 for a DImode operation on a CONST_INT. */
412 else if (GET_MODE (op
) == VOIDmode
&& width
<= HOST_BITS_PER_INT
* 2
413 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
415 unsigned HOST_WIDE_INT l1
, lv
;
416 HOST_WIDE_INT h1
, hv
;
418 if (GET_CODE (op
) == CONST_DOUBLE
)
419 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
421 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
431 neg_double (l1
, h1
, &lv
, &hv
);
436 neg_double (l1
, h1
, &lv
, &hv
);
444 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& (-h1
)) + 1;
446 lv
= exact_log2 (l1
& (-l1
)) + 1;
450 /* This is just a change-of-mode, so do nothing. */
455 if (op_mode
== VOIDmode
456 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
460 lv
= l1
& GET_MODE_MASK (op_mode
);
464 if (op_mode
== VOIDmode
465 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
469 lv
= l1
& GET_MODE_MASK (op_mode
);
470 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
471 && (lv
& ((HOST_WIDE_INT
) 1
472 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
473 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
475 hv
= HWI_SIGN_EXTEND (lv
);
486 return immed_double_const (lv
, hv
, mode
);
489 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
490 else if (GET_CODE (op
) == CONST_DOUBLE
491 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
497 if (setjmp (handler
))
498 /* There used to be a warning here, but that is inadvisable.
499 People may want to cause traps, and the natural way
500 to do it should not get a warning. */
503 set_float_handler (handler
);
505 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
510 d
= REAL_VALUE_NEGATE (d
);
514 if (REAL_VALUE_NEGATIVE (d
))
515 d
= REAL_VALUE_NEGATE (d
);
519 d
= real_value_truncate (mode
, d
);
523 /* All this does is change the mode. */
527 d
= REAL_VALUE_RNDZINT (d
);
531 d
= REAL_VALUE_UNSIGNED_RNDZINT (d
);
541 x
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
542 set_float_handler (NULL_PTR
);
546 else if (GET_CODE (op
) == CONST_DOUBLE
547 && GET_MODE_CLASS (GET_MODE (op
)) == MODE_FLOAT
548 && GET_MODE_CLASS (mode
) == MODE_INT
549 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
555 if (setjmp (handler
))
558 set_float_handler (handler
);
560 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
565 val
= REAL_VALUE_FIX (d
);
569 val
= REAL_VALUE_UNSIGNED_FIX (d
);
576 set_float_handler (NULL_PTR
);
578 val
= trunc_int_for_mode (val
, mode
);
580 return GEN_INT (val
);
583 /* This was formerly used only for non-IEEE float.
584 eggert@twinsun.com says it is safe for IEEE also. */
587 enum rtx_code reversed
;
588 /* There are some simplifications we can do even if the operands
593 /* (not (not X)) == X. */
594 if (GET_CODE (op
) == NOT
)
597 /* (not (eq X Y)) == (ne X Y), etc. */
598 if (mode
== BImode
&& GET_RTX_CLASS (GET_CODE (op
)) == '<'
599 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
601 return gen_rtx_fmt_ee (reversed
,
602 op_mode
, XEXP (op
, 0), XEXP (op
, 1));
606 /* (neg (neg X)) == X. */
607 if (GET_CODE (op
) == NEG
)
612 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
613 becomes just the MINUS if its mode is MODE. This allows
614 folding switch statements on machines using casesi (such as
616 if (GET_CODE (op
) == TRUNCATE
617 && GET_MODE (XEXP (op
, 0)) == mode
618 && GET_CODE (XEXP (op
, 0)) == MINUS
619 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
620 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
623 #ifdef POINTERS_EXTEND_UNSIGNED
624 if (! POINTERS_EXTEND_UNSIGNED
625 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
627 return convert_memory_address (Pmode
, op
);
631 #ifdef POINTERS_EXTEND_UNSIGNED
633 if (POINTERS_EXTEND_UNSIGNED
634 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
636 return convert_memory_address (Pmode
, op
);
648 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
649 and OP1. Return 0 if no simplification is possible.
651 Don't use this for relational operations such as EQ or LT.
652 Use simplify_relational_operation instead. */
655 simplify_binary_operation (code
, mode
, op0
, op1
)
657 enum machine_mode mode
;
660 register HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
662 unsigned int width
= GET_MODE_BITSIZE (mode
);
665 /* Relational operations don't work here. We must know the mode
666 of the operands in order to do the comparison correctly.
667 Assuming a full word can give incorrect results.
668 Consider comparing 128 with -128 in QImode. */
670 if (GET_RTX_CLASS (code
) == '<')
673 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
674 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
675 && GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
676 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
678 REAL_VALUE_TYPE f0
, f1
, value
;
681 if (setjmp (handler
))
684 set_float_handler (handler
);
686 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
687 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
688 f0
= real_value_truncate (mode
, f0
);
689 f1
= real_value_truncate (mode
, f1
);
691 #ifdef REAL_ARITHMETIC
692 #ifndef REAL_INFINITY
693 if (code
== DIV
&& REAL_VALUES_EQUAL (f1
, dconst0
))
696 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
710 #ifndef REAL_INFINITY
717 value
= MIN (f0
, f1
);
720 value
= MAX (f0
, f1
);
727 value
= real_value_truncate (mode
, value
);
728 set_float_handler (NULL_PTR
);
729 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
731 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
733 /* We can fold some multi-word operations. */
734 if (GET_MODE_CLASS (mode
) == MODE_INT
735 && width
== HOST_BITS_PER_WIDE_INT
* 2
736 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
737 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
739 unsigned HOST_WIDE_INT l1
, l2
, lv
;
740 HOST_WIDE_INT h1
, h2
, hv
;
742 if (GET_CODE (op0
) == CONST_DOUBLE
)
743 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
745 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
747 if (GET_CODE (op1
) == CONST_DOUBLE
)
748 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
750 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
755 /* A - B == A + (-B). */
756 neg_double (l2
, h2
, &lv
, &hv
);
759 /* .. fall through ... */
762 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
766 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
769 case DIV
: case MOD
: case UDIV
: case UMOD
:
770 /* We'd need to include tree.h to do this and it doesn't seem worth
775 lv
= l1
& l2
, hv
= h1
& h2
;
779 lv
= l1
| l2
, hv
= h1
| h2
;
783 lv
= l1
^ l2
, hv
= h1
^ h2
;
789 && ((unsigned HOST_WIDE_INT
) l1
790 < (unsigned HOST_WIDE_INT
) l2
)))
799 && ((unsigned HOST_WIDE_INT
) l1
800 > (unsigned HOST_WIDE_INT
) l2
)))
807 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
809 && ((unsigned HOST_WIDE_INT
) l1
810 < (unsigned HOST_WIDE_INT
) l2
)))
817 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
819 && ((unsigned HOST_WIDE_INT
) l1
820 > (unsigned HOST_WIDE_INT
) l2
)))
826 case LSHIFTRT
: case ASHIFTRT
:
828 case ROTATE
: case ROTATERT
:
829 #ifdef SHIFT_COUNT_TRUNCATED
830 if (SHIFT_COUNT_TRUNCATED
)
831 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
834 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
837 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
838 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
840 else if (code
== ASHIFT
)
841 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
842 else if (code
== ROTATE
)
843 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
844 else /* code == ROTATERT */
845 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
852 return immed_double_const (lv
, hv
, mode
);
855 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
856 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
858 /* Even if we can't compute a constant result,
859 there are some cases worth simplifying. */
864 /* In IEEE floating point, x+0 is not the same as x. Similarly
865 for the other optimizations below. */
866 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
867 && FLOAT_MODE_P (mode
) && ! flag_fast_math
)
870 if (op1
== CONST0_RTX (mode
))
873 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
874 if (GET_CODE (op0
) == NEG
)
875 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
876 else if (GET_CODE (op1
) == NEG
)
877 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
879 /* Handle both-operands-constant cases. We can only add
880 CONST_INTs to constants since the sum of relocatable symbols
881 can't be handled by most assemblers. Don't add CONST_INT
882 to CONST_INT since overflow won't be computed properly if wider
883 than HOST_BITS_PER_WIDE_INT. */
885 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
886 && GET_CODE (op1
) == CONST_INT
)
887 return plus_constant (op0
, INTVAL (op1
));
888 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
889 && GET_CODE (op0
) == CONST_INT
)
890 return plus_constant (op1
, INTVAL (op0
));
892 /* See if this is something like X * C - X or vice versa or
893 if the multiplication is written as a shift. If so, we can
894 distribute and make a new multiply, shift, or maybe just
895 have X (if C is 2 in the example above). But don't make
896 real multiply if we didn't have one before. */
898 if (! FLOAT_MODE_P (mode
))
900 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
901 rtx lhs
= op0
, rhs
= op1
;
904 if (GET_CODE (lhs
) == NEG
)
905 coeff0
= -1, lhs
= XEXP (lhs
, 0);
906 else if (GET_CODE (lhs
) == MULT
907 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
909 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
912 else if (GET_CODE (lhs
) == ASHIFT
913 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
914 && INTVAL (XEXP (lhs
, 1)) >= 0
915 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
917 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
921 if (GET_CODE (rhs
) == NEG
)
922 coeff1
= -1, rhs
= XEXP (rhs
, 0);
923 else if (GET_CODE (rhs
) == MULT
924 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
926 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
929 else if (GET_CODE (rhs
) == ASHIFT
930 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
931 && INTVAL (XEXP (rhs
, 1)) >= 0
932 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
934 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
938 if (rtx_equal_p (lhs
, rhs
))
940 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
941 GEN_INT (coeff0
+ coeff1
));
942 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
946 /* If one of the operands is a PLUS or a MINUS, see if we can
947 simplify this by the associative law.
948 Don't use the associative law for floating point.
949 The inaccuracy makes it nonassociative,
950 and subtle programs can break if operations are associated. */
952 if (INTEGRAL_MODE_P (mode
)
953 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
954 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
955 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
961 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
962 using cc0, in which case we want to leave it as a COMPARE
963 so we can distinguish it from a register-register-copy.
965 In IEEE floating point, x-0 is not the same as x. */
967 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
968 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
969 && op1
== CONST0_RTX (mode
))
973 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
974 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
975 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
976 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
978 rtx xop00
= XEXP (op0
, 0);
979 rtx xop10
= XEXP (op1
, 0);
982 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
984 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
985 && GET_MODE (xop00
) == GET_MODE (xop10
)
986 && REGNO (xop00
) == REGNO (xop10
)
987 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
988 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
995 /* None of these optimizations can be done for IEEE
997 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
998 && FLOAT_MODE_P (mode
) && ! flag_fast_math
)
1001 /* We can't assume x-x is 0 even with non-IEEE floating point,
1002 but since it is zero except in very strange circumstances, we
1003 will treat it as zero with -ffast-math. */
1004 if (rtx_equal_p (op0
, op1
)
1005 && ! side_effects_p (op0
)
1006 && (! FLOAT_MODE_P (mode
) || flag_fast_math
))
1007 return CONST0_RTX (mode
);
1009 /* Change subtraction from zero into negation. */
1010 if (op0
== CONST0_RTX (mode
))
1011 return gen_rtx_NEG (mode
, op1
);
1013 /* (-1 - a) is ~a. */
1014 if (op0
== constm1_rtx
)
1015 return gen_rtx_NOT (mode
, op1
);
1017 /* Subtracting 0 has no effect. */
1018 if (op1
== CONST0_RTX (mode
))
1021 /* See if this is something like X * C - X or vice versa or
1022 if the multiplication is written as a shift. If so, we can
1023 distribute and make a new multiply, shift, or maybe just
1024 have X (if C is 2 in the example above). But don't make
1025 real multiply if we didn't have one before. */
1027 if (! FLOAT_MODE_P (mode
))
1029 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1030 rtx lhs
= op0
, rhs
= op1
;
1033 if (GET_CODE (lhs
) == NEG
)
1034 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1035 else if (GET_CODE (lhs
) == MULT
1036 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1038 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1041 else if (GET_CODE (lhs
) == ASHIFT
1042 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1043 && INTVAL (XEXP (lhs
, 1)) >= 0
1044 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1046 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1047 lhs
= XEXP (lhs
, 0);
1050 if (GET_CODE (rhs
) == NEG
)
1051 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1052 else if (GET_CODE (rhs
) == MULT
1053 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1055 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1058 else if (GET_CODE (rhs
) == ASHIFT
1059 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1060 && INTVAL (XEXP (rhs
, 1)) >= 0
1061 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1063 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1064 rhs
= XEXP (rhs
, 0);
1067 if (rtx_equal_p (lhs
, rhs
))
1069 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1070 GEN_INT (coeff0
- coeff1
));
1071 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1075 /* (a - (-b)) -> (a + b). */
1076 if (GET_CODE (op1
) == NEG
)
1077 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1079 /* If one of the operands is a PLUS or a MINUS, see if we can
1080 simplify this by the associative law.
1081 Don't use the associative law for floating point.
1082 The inaccuracy makes it nonassociative,
1083 and subtle programs can break if operations are associated. */
1085 if (INTEGRAL_MODE_P (mode
)
1086 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1087 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
1088 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1091 /* Don't let a relocatable value get a negative coeff. */
1092 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1093 return plus_constant (op0
, - INTVAL (op1
));
1095 /* (x - (x & y)) -> (x & ~y) */
1096 if (GET_CODE (op1
) == AND
)
1098 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1099 return simplify_gen_binary (AND
, mode
, op0
,
1100 gen_rtx_NOT (mode
, XEXP (op1
, 1)));
1101 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1102 return simplify_gen_binary (AND
, mode
, op0
,
1103 gen_rtx_NOT (mode
, XEXP (op1
, 0)));
1108 if (op1
== constm1_rtx
)
1110 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1112 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1115 /* In IEEE floating point, x*0 is not always 0. */
1116 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1117 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
1118 && op1
== CONST0_RTX (mode
)
1119 && ! side_effects_p (op0
))
1122 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1123 However, ANSI says we can drop signals,
1124 so we can do this anyway. */
1125 if (op1
== CONST1_RTX (mode
))
1128 /* Convert multiply by constant power of two into shift unless
1129 we are still generating RTL. This test is a kludge. */
1130 if (GET_CODE (op1
) == CONST_INT
1131 && (val
= exact_log2 (INTVAL (op1
))) >= 0
1132 /* If the mode is larger than the host word size, and the
1133 uppermost bit is set, then this isn't a power of two due
1134 to implicit sign extension. */
1135 && (width
<= HOST_BITS_PER_WIDE_INT
1136 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1137 && ! rtx_equal_function_value_matters
)
1138 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1140 if (GET_CODE (op1
) == CONST_DOUBLE
1141 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
)
1145 int op1is2
, op1ism1
;
1147 if (setjmp (handler
))
1150 set_float_handler (handler
);
1151 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1152 op1is2
= REAL_VALUES_EQUAL (d
, dconst2
);
1153 op1ism1
= REAL_VALUES_EQUAL (d
, dconstm1
);
1154 set_float_handler (NULL_PTR
);
1156 /* x*2 is x+x and x*(-1) is -x */
1157 if (op1is2
&& GET_MODE (op0
) == mode
)
1158 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1160 else if (op1ism1
&& GET_MODE (op0
) == mode
)
1161 return gen_rtx_NEG (mode
, op0
);
1166 if (op1
== const0_rtx
)
1168 if (GET_CODE (op1
) == CONST_INT
1169 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1171 if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1173 /* A | (~A) -> -1 */
1174 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1175 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1176 && ! side_effects_p (op0
)
1177 && GET_MODE_CLASS (mode
) != MODE_CC
)
1182 if (op1
== const0_rtx
)
1184 if (GET_CODE (op1
) == CONST_INT
1185 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1186 return gen_rtx_NOT (mode
, op0
);
1187 if (op0
== op1
&& ! side_effects_p (op0
)
1188 && GET_MODE_CLASS (mode
) != MODE_CC
)
1193 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1195 if (GET_CODE (op1
) == CONST_INT
1196 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1198 if (op0
== op1
&& ! side_effects_p (op0
)
1199 && GET_MODE_CLASS (mode
) != MODE_CC
)
1202 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1203 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1204 && ! side_effects_p (op0
)
1205 && GET_MODE_CLASS (mode
) != MODE_CC
)
1210 /* Convert divide by power of two into shift (divide by 1 handled
1212 if (GET_CODE (op1
) == CONST_INT
1213 && (arg1
= exact_log2 (INTVAL (op1
))) > 0)
1214 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1216 /* ... fall through ... */
1219 if (op1
== CONST1_RTX (mode
))
1222 /* In IEEE floating point, 0/x is not always 0. */
1223 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1224 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
1225 && op0
== CONST0_RTX (mode
)
1226 && ! side_effects_p (op1
))
1229 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1230 /* Change division by a constant into multiplication. Only do
1231 this with -ffast-math until an expert says it is safe in
1233 else if (GET_CODE (op1
) == CONST_DOUBLE
1234 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
1235 && op1
!= CONST0_RTX (mode
)
1239 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1241 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1243 #if defined (REAL_ARITHMETIC)
1244 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1245 return gen_rtx_MULT (mode
, op0
,
1246 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1249 gen_rtx_MULT (mode
, op0
,
1250 CONST_DOUBLE_FROM_REAL_VALUE (1./d
, mode
));
1258 /* Handle modulus by power of two (mod with 1 handled below). */
1259 if (GET_CODE (op1
) == CONST_INT
1260 && exact_log2 (INTVAL (op1
)) > 0)
1261 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1263 /* ... fall through ... */
1266 if ((op0
== const0_rtx
|| op1
== const1_rtx
)
1267 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1273 /* Rotating ~0 always results in ~0. */
1274 if (GET_CODE (op0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1275 && (unsigned HOST_WIDE_INT
) INTVAL (op0
) == GET_MODE_MASK (mode
)
1276 && ! side_effects_p (op1
))
1279 /* ... fall through ... */
1284 if (op1
== const0_rtx
)
1286 if (op0
== const0_rtx
&& ! side_effects_p (op1
))
1291 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1292 && INTVAL (op1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1293 && ! side_effects_p (op0
))
1295 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1300 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1301 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
1302 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1303 && ! side_effects_p (op0
))
1305 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1310 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1312 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1317 if (op1
== constm1_rtx
&& ! side_effects_p (op0
))
1319 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1330 /* Get the integer argument values in two forms:
1331 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1333 arg0
= INTVAL (op0
);
1334 arg1
= INTVAL (op1
);
1336 if (width
< HOST_BITS_PER_WIDE_INT
)
1338 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1339 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1342 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1343 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1346 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1347 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1355 /* Compute the value of the arithmetic. */
1360 val
= arg0s
+ arg1s
;
1364 val
= arg0s
- arg1s
;
1368 val
= arg0s
* arg1s
;
1374 val
= arg0s
/ arg1s
;
1380 val
= arg0s
% arg1s
;
1386 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1392 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1408 /* If shift count is undefined, don't fold it; let the machine do
1409 what it wants. But truncate it if the machine will do that. */
1413 #ifdef SHIFT_COUNT_TRUNCATED
1414 if (SHIFT_COUNT_TRUNCATED
)
1418 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1425 #ifdef SHIFT_COUNT_TRUNCATED
1426 if (SHIFT_COUNT_TRUNCATED
)
1430 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1437 #ifdef SHIFT_COUNT_TRUNCATED
1438 if (SHIFT_COUNT_TRUNCATED
)
1442 val
= arg0s
>> arg1
;
1444 /* Bootstrap compiler may not have sign extended the right shift.
1445 Manually extend the sign to insure bootstrap cc matches gcc. */
1446 if (arg0s
< 0 && arg1
> 0)
1447 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1456 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1457 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1465 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1466 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1470 /* Do nothing here. */
1474 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1478 val
= ((unsigned HOST_WIDE_INT
) arg0
1479 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1483 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1487 val
= ((unsigned HOST_WIDE_INT
) arg0
1488 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1495 val
= trunc_int_for_mode (val
, mode
);
1497 return GEN_INT (val
);
1500 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1503 Rather than test for specific case, we do this by a brute-force method
1504 and do all possible simplifications until no more changes occur. Then
1505 we rebuild the operation. */
1508 simplify_plus_minus (code
, mode
, op0
, op1
)
1510 enum machine_mode mode
;
1516 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
= 0;
1517 int first
= 1, negate
= 0, changed
;
1520 memset ((char *) ops
, 0, sizeof ops
);
1522 /* Set up the two operands and then expand them until nothing has been
1523 changed. If we run out of room in our array, give up; this should
1524 almost never happen. */
1526 ops
[0] = op0
, ops
[1] = op1
, negs
[0] = 0, negs
[1] = (code
== MINUS
);
1533 for (i
= 0; i
< n_ops
; i
++)
1534 switch (GET_CODE (ops
[i
]))
1541 ops
[n_ops
] = XEXP (ops
[i
], 1);
1542 negs
[n_ops
++] = GET_CODE (ops
[i
]) == MINUS
? !negs
[i
] : negs
[i
];
1543 ops
[i
] = XEXP (ops
[i
], 0);
1549 ops
[i
] = XEXP (ops
[i
], 0);
1550 negs
[i
] = ! negs
[i
];
1555 ops
[i
] = XEXP (ops
[i
], 0);
1561 /* ~a -> (-a - 1) */
1564 ops
[n_ops
] = constm1_rtx
;
1565 negs
[n_ops
++] = negs
[i
];
1566 ops
[i
] = XEXP (ops
[i
], 0);
1567 negs
[i
] = ! negs
[i
];
1574 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0, changed
= 1;
1582 /* If we only have two operands, we can't do anything. */
1586 /* Now simplify each pair of operands until nothing changes. The first
1587 time through just simplify constants against each other. */
1594 for (i
= 0; i
< n_ops
- 1; i
++)
1595 for (j
= i
+ 1; j
< n_ops
; j
++)
1596 if (ops
[i
] != 0 && ops
[j
] != 0
1597 && (! first
|| (CONSTANT_P (ops
[i
]) && CONSTANT_P (ops
[j
]))))
1599 rtx lhs
= ops
[i
], rhs
= ops
[j
];
1600 enum rtx_code ncode
= PLUS
;
1602 if (negs
[i
] && ! negs
[j
])
1603 lhs
= ops
[j
], rhs
= ops
[i
], ncode
= MINUS
;
1604 else if (! negs
[i
] && negs
[j
])
1607 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
1610 ops
[i
] = tem
, ops
[j
] = 0;
1611 negs
[i
] = negs
[i
] && negs
[j
];
1612 if (GET_CODE (tem
) == NEG
)
1613 ops
[i
] = XEXP (tem
, 0), negs
[i
] = ! negs
[i
];
1615 if (GET_CODE (ops
[i
]) == CONST_INT
&& negs
[i
])
1616 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0;
1624 /* Pack all the operands to the lower-numbered entries and give up if
1625 we didn't reduce the number of operands we had. Make sure we
1626 count a CONST as two operands. If we have the same number of
1627 operands, but have made more CONSTs than we had, this is also
1628 an improvement, so accept it. */
1630 for (i
= 0, j
= 0; j
< n_ops
; j
++)
1633 ops
[i
] = ops
[j
], negs
[i
++] = negs
[j
];
1634 if (GET_CODE (ops
[j
]) == CONST
)
1638 if (i
+ n_consts
> input_ops
1639 || (i
+ n_consts
== input_ops
&& n_consts
<= input_consts
))
1644 /* If we have a CONST_INT, put it last. */
1645 for (i
= 0; i
< n_ops
- 1; i
++)
1646 if (GET_CODE (ops
[i
]) == CONST_INT
)
1648 tem
= ops
[n_ops
- 1], ops
[n_ops
- 1] = ops
[i
] , ops
[i
] = tem
;
1649 j
= negs
[n_ops
- 1], negs
[n_ops
- 1] = negs
[i
], negs
[i
] = j
;
1652 /* Put a non-negated operand first. If there aren't any, make all
1653 operands positive and negate the whole thing later. */
1654 for (i
= 0; i
< n_ops
&& negs
[i
]; i
++)
1659 for (i
= 0; i
< n_ops
; i
++)
1665 tem
= ops
[0], ops
[0] = ops
[i
], ops
[i
] = tem
;
1666 j
= negs
[0], negs
[0] = negs
[i
], negs
[i
] = j
;
1669 /* Now make the result by performing the requested operations. */
1671 for (i
= 1; i
< n_ops
; i
++)
1672 result
= simplify_gen_binary (negs
[i
] ? MINUS
: PLUS
, mode
, result
, ops
[i
]);
1674 return negate
? gen_rtx_NEG (mode
, result
) : result
;
1679 rtx op0
, op1
; /* Input */
1680 int equal
, op0lt
, op1lt
; /* Output */
1685 check_fold_consts (data
)
1688 struct cfc_args
*args
= (struct cfc_args
*) data
;
1689 REAL_VALUE_TYPE d0
, d1
;
1691 /* We may possibly raise an exception while reading the value. */
1692 args
->unordered
= 1;
1693 REAL_VALUE_FROM_CONST_DOUBLE (d0
, args
->op0
);
1694 REAL_VALUE_FROM_CONST_DOUBLE (d1
, args
->op1
);
1696 /* Comparisons of Inf versus Inf are ordered. */
1697 if (REAL_VALUE_ISNAN (d0
)
1698 || REAL_VALUE_ISNAN (d1
))
1700 args
->equal
= REAL_VALUES_EQUAL (d0
, d1
);
1701 args
->op0lt
= REAL_VALUES_LESS (d0
, d1
);
1702 args
->op1lt
= REAL_VALUES_LESS (d1
, d0
);
1703 args
->unordered
= 0;
1706 /* Like simplify_binary_operation except used for relational operators.
1707 MODE is the mode of the operands, not that of the result. If MODE
1708 is VOIDmode, both operands must also be VOIDmode and we compare the
1709 operands in "infinite precision".
1711 If no simplification is possible, this function returns zero. Otherwise,
1712 it returns either const_true_rtx or const0_rtx. */
1715 simplify_relational_operation (code
, mode
, op0
, op1
)
1717 enum machine_mode mode
;
1720 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
1723 if (mode
== VOIDmode
1724 && (GET_MODE (op0
) != VOIDmode
1725 || GET_MODE (op1
) != VOIDmode
))
1728 /* If op0 is a compare, extract the comparison arguments from it. */
1729 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
1730 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
1732 /* We can't simplify MODE_CC values since we don't know what the
1733 actual comparison is. */
1734 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
1741 /* Make sure the constant is second. */
1742 if ((CONSTANT_P (op0
) && ! CONSTANT_P (op1
))
1743 || (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) != CONST_INT
))
1745 tem
= op0
, op0
= op1
, op1
= tem
;
1746 code
= swap_condition (code
);
1749 /* For integer comparisons of A and B maybe we can simplify A - B and can
1750 then simplify a comparison of that with zero. If A and B are both either
1751 a register or a CONST_INT, this can't help; testing for these cases will
1752 prevent infinite recursion here and speed things up.
1754 If CODE is an unsigned comparison, then we can never do this optimization,
1755 because it gives an incorrect result if the subtraction wraps around zero.
1756 ANSI C defines unsigned operations such that they never overflow, and
1757 thus such cases can not be ignored. */
1759 if (INTEGRAL_MODE_P (mode
) && op1
!= const0_rtx
1760 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == CONST_INT
)
1761 && (GET_CODE (op1
) == REG
|| GET_CODE (op1
) == CONST_INT
))
1762 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
1763 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
1764 return simplify_relational_operation (signed_condition (code
),
1765 mode
, tem
, const0_rtx
);
1767 if (flag_fast_math
&& code
== ORDERED
)
1768 return const_true_rtx
;
1770 if (flag_fast_math
&& code
== UNORDERED
)
1773 /* For non-IEEE floating-point, if the two operands are equal, we know the
1775 if (rtx_equal_p (op0
, op1
)
1776 && (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1777 || ! FLOAT_MODE_P (GET_MODE (op0
)) || flag_fast_math
))
1778 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
1780 /* If the operands are floating-point constants, see if we can fold
1782 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1783 else if (GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
1784 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
1786 struct cfc_args args
;
1788 /* Setup input for check_fold_consts() */
1793 if (!do_float_handler(check_fold_consts
, (PTR
) &args
))
1806 return const_true_rtx
;
1819 /* Receive output from check_fold_consts() */
1821 op0lt
= op0ltu
= args
.op0lt
;
1822 op1lt
= op1ltu
= args
.op1lt
;
1824 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1826 /* Otherwise, see if the operands are both integers. */
1827 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
1828 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
1829 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
1831 int width
= GET_MODE_BITSIZE (mode
);
1832 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
1833 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
1835 /* Get the two words comprising each integer constant. */
1836 if (GET_CODE (op0
) == CONST_DOUBLE
)
1838 l0u
= l0s
= CONST_DOUBLE_LOW (op0
);
1839 h0u
= h0s
= CONST_DOUBLE_HIGH (op0
);
1843 l0u
= l0s
= INTVAL (op0
);
1844 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
1847 if (GET_CODE (op1
) == CONST_DOUBLE
)
1849 l1u
= l1s
= CONST_DOUBLE_LOW (op1
);
1850 h1u
= h1s
= CONST_DOUBLE_HIGH (op1
);
1854 l1u
= l1s
= INTVAL (op1
);
1855 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
1858 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1859 we have to sign or zero-extend the values. */
1860 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
1862 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1863 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1865 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1866 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1868 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1869 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1871 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
1872 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
1874 equal
= (h0u
== h1u
&& l0u
== l1u
);
1875 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
1876 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
1877 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
1878 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
1881 /* Otherwise, there are some code-specific tests we can make. */
1887 /* References to the frame plus a constant or labels cannot
1888 be zero, but a SYMBOL_REF can due to #pragma weak. */
1889 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1890 || GET_CODE (op0
) == LABEL_REF
)
1891 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1892 /* On some machines, the ap reg can be 0 sometimes. */
1893 && op0
!= arg_pointer_rtx
1900 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1901 || GET_CODE (op0
) == LABEL_REF
)
1902 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1903 && op0
!= arg_pointer_rtx
1906 return const_true_rtx
;
1910 /* Unsigned values are never negative. */
1911 if (op1
== const0_rtx
)
1912 return const_true_rtx
;
1916 if (op1
== const0_rtx
)
1921 /* Unsigned values are never greater than the largest
1923 if (GET_CODE (op1
) == CONST_INT
1924 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
1925 && INTEGRAL_MODE_P (mode
))
1926 return const_true_rtx
;
1930 if (GET_CODE (op1
) == CONST_INT
1931 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
1932 && INTEGRAL_MODE_P (mode
))
1943 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
1949 return equal
? const_true_rtx
: const0_rtx
;
1952 return ! equal
? const_true_rtx
: const0_rtx
;
1955 return op0lt
? const_true_rtx
: const0_rtx
;
1958 return op1lt
? const_true_rtx
: const0_rtx
;
1960 return op0ltu
? const_true_rtx
: const0_rtx
;
1962 return op1ltu
? const_true_rtx
: const0_rtx
;
1965 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
1968 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
1970 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
1972 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
1974 return const_true_rtx
;
1982 /* Simplify CODE, an operation with result mode MODE and three operands,
1983 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
1984 a constant. Return 0 if no simplifications is possible. */
1987 simplify_ternary_operation (code
, mode
, op0_mode
, op0
, op1
, op2
)
1989 enum machine_mode mode
, op0_mode
;
1992 unsigned int width
= GET_MODE_BITSIZE (mode
);
1994 /* VOIDmode means "infinite" precision. */
1996 width
= HOST_BITS_PER_WIDE_INT
;
2002 if (GET_CODE (op0
) == CONST_INT
2003 && GET_CODE (op1
) == CONST_INT
2004 && GET_CODE (op2
) == CONST_INT
2005 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2006 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2008 /* Extracting a bit-field from a constant */
2009 HOST_WIDE_INT val
= INTVAL (op0
);
2011 if (BITS_BIG_ENDIAN
)
2012 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2013 - INTVAL (op2
) - INTVAL (op1
));
2015 val
>>= INTVAL (op2
);
2017 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2019 /* First zero-extend. */
2020 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2021 /* If desired, propagate sign bit. */
2022 if (code
== SIGN_EXTRACT
2023 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2024 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2027 /* Clear the bits that don't belong in our mode,
2028 unless they and our sign bit are all one.
2029 So we get either a reasonable negative value or a reasonable
2030 unsigned value for this mode. */
2031 if (width
< HOST_BITS_PER_WIDE_INT
2032 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2033 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2034 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2036 return GEN_INT (val
);
2041 if (GET_CODE (op0
) == CONST_INT
)
2042 return op0
!= const0_rtx
? op1
: op2
;
2044 /* Convert a == b ? b : a to "a". */
2045 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
2046 && (! FLOAT_MODE_P (mode
) || flag_fast_math
)
2047 && rtx_equal_p (XEXP (op0
, 0), op1
)
2048 && rtx_equal_p (XEXP (op0
, 1), op2
))
2050 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
2051 && (! FLOAT_MODE_P (mode
) || flag_fast_math
)
2052 && rtx_equal_p (XEXP (op0
, 1), op1
)
2053 && rtx_equal_p (XEXP (op0
, 0), op2
))
2055 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2057 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2058 ? GET_MODE (XEXP (op0
, 1))
2059 : GET_MODE (XEXP (op0
, 0)));
2061 = simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2062 XEXP (op0
, 0), XEXP (op0
, 1));
2064 /* See if any simplifications were possible. */
2065 if (temp
== const0_rtx
)
2067 else if (temp
== const1_rtx
)
2072 /* Look for happy constants in op1 and op2. */
2073 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2075 HOST_WIDE_INT t
= INTVAL (op1
);
2076 HOST_WIDE_INT f
= INTVAL (op2
);
2078 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2079 code
= GET_CODE (op0
);
2080 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2083 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2091 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2103 /* Simplify X, an rtx expression.
2105 Return the simplified expression or NULL if no simplifications
2108 This is the preferred entry point into the simplification routines;
2109 however, we still allow passes to call the more specific routines.
2111 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2112 code that need to be unified.
2114 1. fold_rtx in cse.c. This code uses various CSE specific
2115 information to aid in RTL simplification.
2117 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2118 it uses combine specific information to aid in RTL
2121 3. The routines in this file.
2124 Long term we want to only have one body of simplification code; to
2125 get to that state I recommend the following steps:
2127 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2128 which are not pass dependent state into these routines.
2130 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2131 use this routine whenever possible.
2133 3. Allow for pass dependent state to be provided to these
2134 routines and add simplifications based on the pass dependent
2135 state. Remove code from cse.c & combine.c that becomes
2138 It will take time, but ultimately the compiler will be easier to
2139 maintain and improve. It's totally silly that when we add a
2140 simplification that it needs to be added to 4 places (3 for RTL
2141 simplification and 1 for tree simplification. */
2148 enum machine_mode mode
;
2150 mode
= GET_MODE (x
);
2151 code
= GET_CODE (x
);
2153 switch (GET_RTX_CLASS (code
))
2156 return simplify_unary_operation (code
, mode
,
2157 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
2160 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
2164 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
2165 XEXP (x
, 0), XEXP (x
, 1), XEXP (x
, 2));
2168 return simplify_relational_operation (code
,
2169 (GET_MODE (XEXP (x
, 0)) != VOIDmode
2170 ? GET_MODE (XEXP (x
, 0))
2171 : GET_MODE (XEXP (x
, 1))),
2172 XEXP (x
, 0), XEXP (x
, 1));
2179 /* Allocate a struct elt_list and fill in its two elements with the
2182 static struct elt_list
*
2183 new_elt_list (next
, elt
)
2184 struct elt_list
*next
;
2187 struct elt_list
*el
= empty_elt_lists
;
2190 empty_elt_lists
= el
->next
;
2192 el
= (struct elt_list
*) obstack_alloc (&cselib_obstack
,
2193 sizeof (struct elt_list
));
2199 /* Allocate a struct elt_loc_list and fill in its two elements with the
2202 static struct elt_loc_list
*
2203 new_elt_loc_list (next
, loc
)
2204 struct elt_loc_list
*next
;
2207 struct elt_loc_list
*el
= empty_elt_loc_lists
;
2210 empty_elt_loc_lists
= el
->next
;
2212 el
= (struct elt_loc_list
*) obstack_alloc (&cselib_obstack
,
2213 sizeof (struct elt_loc_list
));
2216 el
->setting_insn
= cselib_current_insn
;
2220 /* The elt_list at *PL is no longer needed. Unchain it and free its
2224 unchain_one_elt_list (pl
)
2225 struct elt_list
**pl
;
2227 struct elt_list
*l
= *pl
;
2230 l
->next
= empty_elt_lists
;
2231 empty_elt_lists
= l
;
2234 /* Likewise for elt_loc_lists. */
2237 unchain_one_elt_loc_list (pl
)
2238 struct elt_loc_list
**pl
;
2240 struct elt_loc_list
*l
= *pl
;
2243 l
->next
= empty_elt_loc_lists
;
2244 empty_elt_loc_lists
= l
;
2247 /* Likewise for cselib_vals. This also frees the addr_list associated with
2251 unchain_one_value (v
)
2254 while (v
->addr_list
)
2255 unchain_one_elt_list (&v
->addr_list
);
2257 v
->u
.next_free
= empty_vals
;
2261 /* Remove all entries from the hash table. Also used during
2262 initialization. If CLEAR_ALL isn't set, then only clear the entries
2263 which are known to have been used. */
2266 clear_table (clear_all
)
2272 for (i
= 0; i
< cselib_nregs
; i
++)
2275 for (i
= 0; i
< VARRAY_ACTIVE_SIZE (used_regs
); i
++)
2276 REG_VALUES (VARRAY_UINT (used_regs
, i
)) = 0;
2278 VARRAY_POP_ALL (used_regs
);
2280 htab_empty (hash_table
);
2281 obstack_free (&cselib_obstack
, cselib_startobj
);
2284 empty_elt_lists
= 0;
2285 empty_elt_loc_lists
= 0;
2286 n_useless_values
= 0;
2288 next_unknown_value
= 0;
2291 /* The equality test for our hash table. The first argument ENTRY is a table
2292 element (i.e. a cselib_val), while the second arg X is an rtx. We know
2293 that all callers of htab_find_slot_with_hash will wrap CONST_INTs into a
2294 CONST of an appropriate mode. */
2297 entry_and_rtx_equal_p (entry
, x_arg
)
2298 const void *entry
, *x_arg
;
2300 struct elt_loc_list
*l
;
2301 const cselib_val
*v
= (const cselib_val
*) entry
;
2302 rtx x
= (rtx
) x_arg
;
2303 enum machine_mode mode
= GET_MODE (x
);
2305 if (GET_CODE (x
) == CONST_INT
2306 || (mode
== VOIDmode
&& GET_CODE (x
) == CONST_DOUBLE
))
2308 if (mode
!= GET_MODE (v
->u
.val_rtx
))
2311 /* Unwrap X if necessary. */
2312 if (GET_CODE (x
) == CONST
2313 && (GET_CODE (XEXP (x
, 0)) == CONST_INT
2314 || GET_CODE (XEXP (x
, 0)) == CONST_DOUBLE
))
2317 /* We don't guarantee that distinct rtx's have different hash values,
2318 so we need to do a comparison. */
2319 for (l
= v
->locs
; l
; l
= l
->next
)
2320 if (rtx_equal_for_cselib_p (l
->loc
, x
))
2326 /* The hash function for our hash table. The value is always computed with
2327 hash_rtx when adding an element; this function just extracts the hash
2328 value from a cselib_val structure. */
2331 get_value_hash (entry
)
2334 const cselib_val
*v
= (const cselib_val
*) entry
;
2338 /* Return true if X contains a VALUE rtx. If ONLY_USELESS is set, we
2339 only return true for values which point to a cselib_val whose value
2340 element has been set to zero, which implies the cselib_val will be
2344 references_value_p (x
, only_useless
)
2348 enum rtx_code code
= GET_CODE (x
);
2349 const char *fmt
= GET_RTX_FORMAT (code
);
2352 if (GET_CODE (x
) == VALUE
2353 && (! only_useless
|| CSELIB_VAL_PTR (x
)->locs
== 0))
2356 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2358 if (fmt
[i
] == 'e' && references_value_p (XEXP (x
, i
), only_useless
))
2360 else if (fmt
[i
] == 'E')
2361 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2362 if (references_value_p (XVECEXP (x
, i
, j
), only_useless
))
2369 /* For all locations found in X, delete locations that reference useless
2370 values (i.e. values without any location). Called through
2374 discard_useless_locs (x
, info
)
2376 void *info ATTRIBUTE_UNUSED
;
2378 cselib_val
*v
= (cselib_val
*)*x
;
2379 struct elt_loc_list
**p
= &v
->locs
;
2380 int had_locs
= v
->locs
!= 0;
2384 if (references_value_p ((*p
)->loc
, 1))
2385 unchain_one_elt_loc_list (p
);
2390 if (had_locs
&& v
->locs
== 0)
2393 values_became_useless
= 1;
2398 /* If X is a value with no locations, remove it from the hashtable. */
2401 discard_useless_values (x
, info
)
2403 void *info ATTRIBUTE_UNUSED
;
2405 cselib_val
*v
= (cselib_val
*)*x
;
2409 htab_clear_slot (hash_table
, x
);
2410 unchain_one_value (v
);
2417 /* Clean out useless values (i.e. those which no longer have locations
2418 associated with them) from the hash table. */
2421 remove_useless_values ()
2423 /* First pass: eliminate locations that reference the value. That in
2424 turn can make more values useless. */
2427 values_became_useless
= 0;
2428 htab_traverse (hash_table
, discard_useless_locs
, 0);
2430 while (values_became_useless
);
2432 /* Second pass: actually remove the values. */
2433 htab_traverse (hash_table
, discard_useless_values
, 0);
2435 if (n_useless_values
!= 0)
2439 /* Return nonzero if we can prove that X and Y contain the same value, taking
2440 our gathered information into account. */
2443 rtx_equal_for_cselib_p (x
, y
)
2450 if (GET_CODE (x
) == REG
|| GET_CODE (x
) == MEM
)
2452 cselib_val
*e
= cselib_lookup (x
, GET_MODE (x
), 0);
2458 if (GET_CODE (y
) == REG
|| GET_CODE (y
) == MEM
)
2460 cselib_val
*e
= cselib_lookup (y
, GET_MODE (y
), 0);
2469 if (GET_CODE (x
) == VALUE
&& GET_CODE (y
) == VALUE
)
2470 return CSELIB_VAL_PTR (x
) == CSELIB_VAL_PTR (y
);
2472 if (GET_CODE (x
) == VALUE
)
2474 cselib_val
*e
= CSELIB_VAL_PTR (x
);
2475 struct elt_loc_list
*l
;
2477 for (l
= e
->locs
; l
; l
= l
->next
)
2481 /* Avoid infinite recursion. */
2482 if (GET_CODE (t
) == REG
|| GET_CODE (t
) == MEM
)
2484 else if (rtx_equal_for_cselib_p (t
, y
))
2491 if (GET_CODE (y
) == VALUE
)
2493 cselib_val
*e
= CSELIB_VAL_PTR (y
);
2494 struct elt_loc_list
*l
;
2496 for (l
= e
->locs
; l
; l
= l
->next
)
2500 if (GET_CODE (t
) == REG
|| GET_CODE (t
) == MEM
)
2502 else if (rtx_equal_for_cselib_p (x
, t
))
2509 if (GET_CODE (x
) != GET_CODE (y
) || GET_MODE (x
) != GET_MODE (y
))
2512 /* This won't be handled correctly by the code below. */
2513 if (GET_CODE (x
) == LABEL_REF
)
2514 return XEXP (x
, 0) == XEXP (y
, 0);
2516 code
= GET_CODE (x
);
2517 fmt
= GET_RTX_FORMAT (code
);
2519 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2526 if (XWINT (x
, i
) != XWINT (y
, i
))
2532 if (XINT (x
, i
) != XINT (y
, i
))
2538 /* Two vectors must have the same length. */
2539 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
2542 /* And the corresponding elements must match. */
2543 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2544 if (! rtx_equal_for_cselib_p (XVECEXP (x
, i
, j
),
2550 if (! rtx_equal_for_cselib_p (XEXP (x
, i
), XEXP (y
, i
)))
2556 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
2561 /* These are just backpointers, so they don't matter. */
2568 /* It is believed that rtx's at this level will never
2569 contain anything but integers and other rtx's,
2570 except for within LABEL_REFs and SYMBOL_REFs. */
2578 /* We need to pass down the mode of constants through the hash table
2579 functions. For that purpose, wrap them in a CONST of the appropriate
2582 wrap_constant (mode
, x
)
2583 enum machine_mode mode
;
2586 if (GET_CODE (x
) != CONST_INT
2587 && (GET_CODE (x
) != CONST_DOUBLE
|| GET_MODE (x
) != VOIDmode
))
2589 if (mode
== VOIDmode
)
2591 return gen_rtx_CONST (mode
, x
);
2594 /* Hash an rtx. Return 0 if we couldn't hash the rtx.
2595 For registers and memory locations, we look up their cselib_val structure
2596 and return its VALUE element.
2597 Possible reasons for return 0 are: the object is volatile, or we couldn't
2598 find a register or memory location in the table and CREATE is zero. If
2599 CREATE is nonzero, table elts are created for regs and mem.
2600 MODE is used in hashing for CONST_INTs only;
2601 otherwise the mode of X is used. */
2604 hash_rtx (x
, mode
, create
)
2606 enum machine_mode mode
;
2613 unsigned int hash
= 0;
2615 /* repeat is used to turn tail-recursion into iteration. */
2617 code
= GET_CODE (x
);
2618 hash
+= (unsigned) code
+ (unsigned) GET_MODE (x
);
2624 e
= cselib_lookup (x
, GET_MODE (x
), create
);
2632 hash
+= ((unsigned) CONST_INT
<< 7) + (unsigned) mode
+ INTVAL (x
);
2633 return hash
? hash
: CONST_INT
;
2636 /* This is like the general case, except that it only counts
2637 the integers representing the constant. */
2638 hash
+= (unsigned) code
+ (unsigned) GET_MODE (x
);
2639 if (GET_MODE (x
) != VOIDmode
)
2640 for (i
= 2; i
< GET_RTX_LENGTH (CONST_DOUBLE
); i
++)
2641 hash
+= XWINT (x
, i
);
2643 hash
+= ((unsigned) CONST_DOUBLE_LOW (x
)
2644 + (unsigned) CONST_DOUBLE_HIGH (x
));
2645 return hash
? hash
: CONST_DOUBLE
;
2647 /* Assume there is only one rtx object for any given label. */
2650 += ((unsigned) LABEL_REF
<< 7) + (unsigned long) XEXP (x
, 0);
2651 return hash
? hash
: LABEL_REF
;
2655 += ((unsigned) SYMBOL_REF
<< 7) + (unsigned long) XSTR (x
, 0);
2656 return hash
? hash
: SYMBOL_REF
;
2667 case UNSPEC_VOLATILE
:
2671 if (MEM_VOLATILE_P (x
))
2680 i
= GET_RTX_LENGTH (code
) - 1;
2681 fmt
= GET_RTX_FORMAT (code
);
2686 rtx tem
= XEXP (x
, i
);
2687 unsigned int tem_hash
;
2689 /* If we are about to do the last recursive call
2690 needed at this level, change it into iteration.
2691 This function is called enough to be worth it. */
2698 tem_hash
= hash_rtx (tem
, 0, create
);
2704 else if (fmt
[i
] == 'E')
2705 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2707 unsigned int tem_hash
= hash_rtx (XVECEXP (x
, i
, j
), 0, create
);
2714 else if (fmt
[i
] == 's')
2716 const unsigned char *p
= (const unsigned char *) XSTR (x
, i
);
2722 else if (fmt
[i
] == 'i')
2723 hash
+= XINT (x
, i
);
2724 else if (fmt
[i
] == '0' || fmt
[i
] == 't')
2730 return hash
? hash
: 1 + GET_CODE (x
);
2733 /* Create a new value structure for VALUE and initialize it. The mode of the
2737 new_cselib_val (value
, mode
)
2739 enum machine_mode mode
;
2741 cselib_val
*e
= empty_vals
;
2744 empty_vals
= e
->u
.next_free
;
2746 e
= (cselib_val
*) obstack_alloc (&cselib_obstack
, sizeof (cselib_val
));
2752 e
->u
.val_rtx
= gen_rtx_VALUE (mode
);
2753 CSELIB_VAL_PTR (e
->u
.val_rtx
) = e
;
2759 /* ADDR_ELT is a value that is used as address. MEM_ELT is the value that
2760 contains the data at this address. X is a MEM that represents the
2761 value. Update the two value structures to represent this situation. */
2764 add_mem_for_addr (addr_elt
, mem_elt
, x
)
2765 cselib_val
*addr_elt
, *mem_elt
;
2769 struct elt_loc_list
*l
;
2771 /* Avoid duplicates. */
2772 for (l
= mem_elt
->locs
; l
; l
= l
->next
)
2773 if (GET_CODE (l
->loc
) == MEM
2774 && CSELIB_VAL_PTR (XEXP (l
->loc
, 0)) == addr_elt
)
2777 new = gen_rtx_MEM (GET_MODE (x
), addr_elt
->u
.val_rtx
);
2778 MEM_COPY_ATTRIBUTES (new, x
);
2780 addr_elt
->addr_list
= new_elt_list (addr_elt
->addr_list
, mem_elt
);
2781 mem_elt
->locs
= new_elt_loc_list (mem_elt
->locs
, new);
2784 /* Subroutine of cselib_lookup. Return a value for X, which is a MEM rtx.
2785 If CREATE, make a new one if we haven't seen it before. */
2788 cselib_lookup_mem (x
, create
)
2792 enum machine_mode mode
= GET_MODE (x
);
2795 cselib_val
*mem_elt
;
2798 if (MEM_VOLATILE_P (x
) || mode
== BLKmode
2799 || (FLOAT_MODE_P (mode
) && flag_float_store
))
2802 /* Look up the value for the address. */
2803 addr
= cselib_lookup (XEXP (x
, 0), mode
, create
);
2807 /* Find a value that describes a value of our mode at that address. */
2808 for (l
= addr
->addr_list
; l
; l
= l
->next
)
2809 if (GET_MODE (l
->elt
->u
.val_rtx
) == mode
)
2815 mem_elt
= new_cselib_val (++next_unknown_value
, mode
);
2816 add_mem_for_addr (addr
, mem_elt
, x
);
2817 slot
= htab_find_slot_with_hash (hash_table
, wrap_constant (mode
, x
),
2818 mem_elt
->value
, INSERT
);
2823 /* Walk rtx X and replace all occurrences of REG and MEM subexpressions
2824 with VALUE expressions. This way, it becomes independent of changes
2825 to registers and memory.
2826 X isn't actually modified; if modifications are needed, new rtl is
2827 allocated. However, the return value can share rtl with X. */
2830 cselib_subst_to_values (x
)
2833 enum rtx_code code
= GET_CODE (x
);
2834 const char *fmt
= GET_RTX_FORMAT (code
);
2843 for (l
= REG_VALUES (REGNO (x
)); l
; l
= l
->next
)
2844 if (GET_MODE (l
->elt
->u
.val_rtx
) == GET_MODE (x
))
2845 return l
->elt
->u
.val_rtx
;
2850 e
= cselib_lookup_mem (x
, 0);
2853 return e
->u
.val_rtx
;
2855 /* CONST_DOUBLEs must be special-cased here so that we won't try to
2856 look up the CONST_DOUBLE_MEM inside. */
2865 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2869 rtx t
= cselib_subst_to_values (XEXP (x
, i
));
2871 if (t
!= XEXP (x
, i
) && x
== copy
)
2872 copy
= shallow_copy_rtx (x
);
2876 else if (fmt
[i
] == 'E')
2880 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2882 rtx t
= cselib_subst_to_values (XVECEXP (x
, i
, j
));
2884 if (t
!= XVECEXP (x
, i
, j
) && XVEC (x
, i
) == XVEC (copy
, i
))
2887 copy
= shallow_copy_rtx (x
);
2889 XVEC (copy
, i
) = rtvec_alloc (XVECLEN (x
, i
));
2890 for (k
= 0; k
< j
; k
++)
2891 XVECEXP (copy
, i
, k
) = XVECEXP (x
, i
, k
);
2894 XVECEXP (copy
, i
, j
) = t
;
2902 /* Look up the rtl expression X in our tables and return the value it has.
2903 If CREATE is zero, we return NULL if we don't know the value. Otherwise,
2904 we create a new one if possible, using mode MODE if X doesn't have a mode
2905 (i.e. because it's a constant). */
2908 cselib_lookup (x
, mode
, create
)
2910 enum machine_mode mode
;
2915 unsigned int hashval
;
2917 if (GET_MODE (x
) != VOIDmode
)
2918 mode
= GET_MODE (x
);
2920 if (GET_CODE (x
) == VALUE
)
2921 return CSELIB_VAL_PTR (x
);
2923 if (GET_CODE (x
) == REG
)
2926 unsigned int i
= REGNO (x
);
2928 for (l
= REG_VALUES (i
); l
; l
= l
->next
)
2929 if (mode
== GET_MODE (l
->elt
->u
.val_rtx
))
2935 e
= new_cselib_val (++next_unknown_value
, GET_MODE (x
));
2936 e
->locs
= new_elt_loc_list (e
->locs
, x
);
2937 if (REG_VALUES (i
) == 0)
2938 VARRAY_PUSH_UINT (used_regs
, i
);
2939 REG_VALUES (i
) = new_elt_list (REG_VALUES (i
), e
);
2940 slot
= htab_find_slot_with_hash (hash_table
, x
, e
->value
, INSERT
);
2945 if (GET_CODE (x
) == MEM
)
2946 return cselib_lookup_mem (x
, create
);
2948 hashval
= hash_rtx (x
, mode
, create
);
2949 /* Can't even create if hashing is not possible. */
2953 slot
= htab_find_slot_with_hash (hash_table
, wrap_constant (mode
, x
),
2954 hashval
, create
? INSERT
: NO_INSERT
);
2958 e
= (cselib_val
*) *slot
;
2962 e
= new_cselib_val (hashval
, mode
);
2964 /* We have to fill the slot before calling cselib_subst_to_values:
2965 the hash table is inconsistent until we do so, and
2966 cselib_subst_to_values will need to do lookups. */
2968 e
->locs
= new_elt_loc_list (e
->locs
, cselib_subst_to_values (x
));
2972 /* Invalidate any entries in reg_values that overlap REGNO. This is called
2973 if REGNO is changing. MODE is the mode of the assignment to REGNO, which
2974 is used to determine how many hard registers are being changed. If MODE
2975 is VOIDmode, then only REGNO is being changed; this is used when
2976 invalidating call clobbered registers across a call. */
2979 cselib_invalidate_regno (regno
, mode
)
2981 enum machine_mode mode
;
2983 unsigned int endregno
;
2986 /* If we see pseudos after reload, something is _wrong_. */
2987 if (reload_completed
&& regno
>= FIRST_PSEUDO_REGISTER
2988 && reg_renumber
[regno
] >= 0)
2991 /* Determine the range of registers that must be invalidated. For
2992 pseudos, only REGNO is affected. For hard regs, we must take MODE
2993 into account, and we must also invalidate lower register numbers
2994 if they contain values that overlap REGNO. */
2995 endregno
= regno
+ 1;
2996 if (regno
< FIRST_PSEUDO_REGISTER
&& mode
!= VOIDmode
)
2997 endregno
= regno
+ HARD_REGNO_NREGS (regno
, mode
);
2999 for (i
= 0; i
< endregno
; i
++)
3001 struct elt_list
**l
= ®_VALUES (i
);
3003 /* Go through all known values for this reg; if it overlaps the range
3004 we're invalidating, remove the value. */
3007 cselib_val
*v
= (*l
)->elt
;
3008 struct elt_loc_list
**p
;
3009 unsigned int this_last
= i
;
3011 if (i
< FIRST_PSEUDO_REGISTER
)
3012 this_last
+= HARD_REGNO_NREGS (i
, GET_MODE (v
->u
.val_rtx
)) - 1;
3014 if (this_last
< regno
)
3020 /* We have an overlap. */
3021 unchain_one_elt_list (l
);
3023 /* Now, we clear the mapping from value to reg. It must exist, so
3024 this code will crash intentionally if it doesn't. */
3025 for (p
= &v
->locs
; ; p
= &(*p
)->next
)
3029 if (GET_CODE (x
) == REG
&& REGNO (x
) == i
)
3031 unchain_one_elt_loc_list (p
);
3041 /* The memory at address MEM_BASE is being changed.
3042 Return whether this change will invalidate VAL. */
3045 cselib_mem_conflict_p (mem_base
, val
)
3053 code
= GET_CODE (val
);
3056 /* Get rid of a few simple cases quickly. */
3069 if (GET_MODE (mem_base
) == BLKmode
3070 || GET_MODE (val
) == BLKmode
3071 || anti_dependence (val
, mem_base
))
3074 /* The address may contain nested MEMs. */
3081 fmt
= GET_RTX_FORMAT (code
);
3082 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3086 if (cselib_mem_conflict_p (mem_base
, XEXP (val
, i
)))
3089 else if (fmt
[i
] == 'E')
3090 for (j
= 0; j
< XVECLEN (val
, i
); j
++)
3091 if (cselib_mem_conflict_p (mem_base
, XVECEXP (val
, i
, j
)))
3098 /* For the value found in SLOT, walk its locations to determine if any overlap
3099 INFO (which is a MEM rtx). */
3102 cselib_invalidate_mem_1 (slot
, info
)
3106 cselib_val
*v
= (cselib_val
*) *slot
;
3107 rtx mem_rtx
= (rtx
) info
;
3108 struct elt_loc_list
**p
= &v
->locs
;
3109 int had_locs
= v
->locs
!= 0;
3115 struct elt_list
**mem_chain
;
3117 /* MEMs may occur in locations only at the top level; below
3118 that every MEM or REG is substituted by its VALUE. */
3119 if (GET_CODE (x
) != MEM
3120 || ! cselib_mem_conflict_p (mem_rtx
, x
))
3126 /* This one overlaps. */
3127 /* We must have a mapping from this MEM's address to the
3128 value (E). Remove that, too. */
3129 addr
= cselib_lookup (XEXP (x
, 0), VOIDmode
, 0);
3130 mem_chain
= &addr
->addr_list
;
3133 if ((*mem_chain
)->elt
== v
)
3135 unchain_one_elt_list (mem_chain
);
3139 mem_chain
= &(*mem_chain
)->next
;
3142 unchain_one_elt_loc_list (p
);
3145 if (had_locs
&& v
->locs
== 0)
3151 /* Invalidate any locations in the table which are changed because of a
3152 store to MEM_RTX. If this is called because of a non-const call
3153 instruction, MEM_RTX is (mem:BLK const0_rtx). */
3156 cselib_invalidate_mem (mem_rtx
)
3159 htab_traverse (hash_table
, cselib_invalidate_mem_1
, mem_rtx
);
3162 /* Invalidate DEST, which is being assigned to or clobbered. The second and
3163 the third parameter exist so that this function can be passed to
3164 note_stores; they are ignored. */
3167 cselib_invalidate_rtx (dest
, ignore
, data
)
3169 rtx ignore ATTRIBUTE_UNUSED
;
3170 void *data ATTRIBUTE_UNUSED
;
3172 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SIGN_EXTRACT
3173 || GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SUBREG
)
3174 dest
= XEXP (dest
, 0);
3176 if (GET_CODE (dest
) == REG
)
3177 cselib_invalidate_regno (REGNO (dest
), GET_MODE (dest
));
3178 else if (GET_CODE (dest
) == MEM
)
3179 cselib_invalidate_mem (dest
);
3181 /* Some machines don't define AUTO_INC_DEC, but they still use push
3182 instructions. We need to catch that case here in order to
3183 invalidate the stack pointer correctly. Note that invalidating
3184 the stack pointer is different from invalidating DEST. */
3185 if (push_operand (dest
, GET_MODE (dest
)))
3186 cselib_invalidate_rtx (stack_pointer_rtx
, NULL_RTX
, NULL
);
3189 /* Record the result of a SET instruction. DEST is being set; the source
3190 contains the value described by SRC_ELT. If DEST is a MEM, DEST_ADDR_ELT
3191 describes its address. */
3194 cselib_record_set (dest
, src_elt
, dest_addr_elt
)
3196 cselib_val
*src_elt
, *dest_addr_elt
;
3198 int dreg
= GET_CODE (dest
) == REG
? (int) REGNO (dest
) : -1;
3200 if (src_elt
== 0 || side_effects_p (dest
))
3205 if (REG_VALUES (dreg
) == 0)
3206 VARRAY_PUSH_UINT (used_regs
, dreg
);
3208 REG_VALUES (dreg
) = new_elt_list (REG_VALUES (dreg
), src_elt
);
3209 if (src_elt
->locs
== 0)
3211 src_elt
->locs
= new_elt_loc_list (src_elt
->locs
, dest
);
3213 else if (GET_CODE (dest
) == MEM
&& dest_addr_elt
!= 0)
3215 if (src_elt
->locs
== 0)
3217 add_mem_for_addr (dest_addr_elt
, src_elt
, dest
);
3221 /* Describe a single set that is part of an insn. */
3226 cselib_val
*src_elt
;
3227 cselib_val
*dest_addr_elt
;
3230 /* There is no good way to determine how many elements there can be
3231 in a PARALLEL. Since it's fairly cheap, use a really large number. */
3232 #define MAX_SETS (FIRST_PSEUDO_REGISTER * 2)
3234 /* Record the effects of any sets in INSN. */
3236 cselib_record_sets (insn
)
3241 struct set sets
[MAX_SETS
];
3242 rtx body
= PATTERN (insn
);
3244 body
= PATTERN (insn
);
3245 /* Find all sets. */
3246 if (GET_CODE (body
) == SET
)
3248 sets
[0].src
= SET_SRC (body
);
3249 sets
[0].dest
= SET_DEST (body
);
3252 else if (GET_CODE (body
) == PARALLEL
)
3254 /* Look through the PARALLEL and record the values being
3255 set, if possible. Also handle any CLOBBERs. */
3256 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; --i
)
3258 rtx x
= XVECEXP (body
, 0, i
);
3260 if (GET_CODE (x
) == SET
)
3262 sets
[n_sets
].src
= SET_SRC (x
);
3263 sets
[n_sets
].dest
= SET_DEST (x
);
3269 /* Look up the values that are read. Do this before invalidating the
3270 locations that are written. */
3271 for (i
= 0; i
< n_sets
; i
++)
3273 rtx dest
= sets
[i
].dest
;
3275 /* A STRICT_LOW_PART can be ignored; we'll record the equivalence for
3276 the low part after invalidating any knowledge about larger modes. */
3277 if (GET_CODE (sets
[i
].dest
) == STRICT_LOW_PART
)
3278 sets
[i
].dest
= dest
= XEXP (dest
, 0);
3280 /* We don't know how to record anything but REG or MEM. */
3281 if (GET_CODE (dest
) == REG
|| GET_CODE (dest
) == MEM
)
3283 sets
[i
].src_elt
= cselib_lookup (sets
[i
].src
, GET_MODE (dest
), 1);
3284 if (GET_CODE (dest
) == MEM
)
3285 sets
[i
].dest_addr_elt
= cselib_lookup (XEXP (dest
, 0), Pmode
, 1);
3287 sets
[i
].dest_addr_elt
= 0;
3291 /* Invalidate all locations written by this insn. Note that the elts we
3292 looked up in the previous loop aren't affected, just some of their
3293 locations may go away. */
3294 note_stores (body
, cselib_invalidate_rtx
, NULL
);
3296 /* Now enter the equivalences in our tables. */
3297 for (i
= 0; i
< n_sets
; i
++)
3299 rtx dest
= sets
[i
].dest
;
3300 if (GET_CODE (dest
) == REG
|| GET_CODE (dest
) == MEM
)
3301 cselib_record_set (dest
, sets
[i
].src_elt
, sets
[i
].dest_addr_elt
);
3305 /* Record the effects of INSN. */
3308 cselib_process_insn (insn
)
3314 cselib_current_insn
= insn
;
3316 /* Forget everything at a CODE_LABEL, a volatile asm, or a setjmp. */
3317 if (GET_CODE (insn
) == CODE_LABEL
3318 || (GET_CODE (insn
) == NOTE
3319 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_SETJMP
)
3320 || (GET_CODE (insn
) == INSN
3321 && GET_CODE (PATTERN (insn
)) == ASM_OPERANDS
3322 && MEM_VOLATILE_P (PATTERN (insn
))))
3328 if (! INSN_P (insn
))
3330 cselib_current_insn
= 0;
3334 /* If this is a call instruction, forget anything stored in a
3335 call clobbered register, or, if this is not a const call, in
3337 if (GET_CODE (insn
) == CALL_INSN
)
3339 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3340 if (call_used_regs
[i
])
3341 cselib_invalidate_regno (i
, VOIDmode
);
3343 if (! CONST_CALL_P (insn
))
3344 cselib_invalidate_mem (callmem
);
3347 cselib_record_sets (insn
);
3350 /* Clobber any registers which appear in REG_INC notes. We
3351 could keep track of the changes to their values, but it is
3352 unlikely to help. */
3353 for (x
= REG_NOTES (insn
); x
; x
= XEXP (x
, 1))
3354 if (REG_NOTE_KIND (x
) == REG_INC
)
3355 cselib_invalidate_rtx (XEXP (x
, 0), NULL_RTX
, NULL
);
3358 /* Look for any CLOBBERs in CALL_INSN_FUNCTION_USAGE, but only
3359 after we have processed the insn. */
3360 if (GET_CODE (insn
) == CALL_INSN
)
3361 for (x
= CALL_INSN_FUNCTION_USAGE (insn
); x
; x
= XEXP (x
, 1))
3362 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
3363 cselib_invalidate_rtx (XEXP (XEXP (x
, 0), 0), NULL_RTX
, NULL
);
3365 cselib_current_insn
= 0;
3367 if (n_useless_values
> MAX_USELESS_VALUES
)
3368 remove_useless_values ();
3371 /* Make sure our varrays are big enough. Not called from any cselib routines;
3372 it must be called by the user if it allocated new registers. */
3375 cselib_update_varray_sizes ()
3377 unsigned int nregs
= max_reg_num ();
3379 if (nregs
== cselib_nregs
)
3382 cselib_nregs
= nregs
;
3383 VARRAY_GROW (reg_values
, nregs
);
3384 VARRAY_GROW (used_regs
, nregs
);
3387 /* Initialize cselib for one pass. The caller must also call
3388 init_alias_analysis. */
3393 /* These are only created once. */
3396 gcc_obstack_init (&cselib_obstack
);
3397 cselib_startobj
= obstack_alloc (&cselib_obstack
, 0);
3399 callmem
= gen_rtx_MEM (BLKmode
, const0_rtx
);
3400 ggc_add_rtx_root (&callmem
, 1);
3403 cselib_nregs
= max_reg_num ();
3404 VARRAY_ELT_LIST_INIT (reg_values
, cselib_nregs
, "reg_values");
3405 VARRAY_UINT_INIT (used_regs
, cselib_nregs
, "used_regs");
3406 hash_table
= htab_create (31, get_value_hash
, entry_and_rtx_equal_p
, NULL
);
3410 /* Called when the current user is done with cselib. */
3416 VARRAY_FREE (reg_values
);
3417 VARRAY_FREE (used_regs
);
3418 htab_delete (hash_table
);