1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
33 #include "insn-config.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
47 virtual regs here because the simplify_*_operation routines are called
48 by integrate.c, which is called before virtual register instantiation.
50 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
51 a header file so that their definitions can be shared with the
52 simplification routines in simplify-rtx.c. Until then, do not
53 change these macros without also changing the copy in simplify-rtx.c. */
55 #define FIXED_BASE_PLUS_P(X) \
56 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
57 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
58 || (X) == virtual_stack_vars_rtx \
59 || (X) == virtual_incoming_args_rtx \
60 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
61 && (XEXP (X, 0) == frame_pointer_rtx \
62 || XEXP (X, 0) == hard_frame_pointer_rtx \
63 || ((X) == arg_pointer_rtx \
64 && fixed_regs[ARG_POINTER_REGNUM]) \
65 || XEXP (X, 0) == virtual_stack_vars_rtx \
66 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
67 || GET_CODE (X) == ADDRESSOF)
69 /* Similar, but also allows reference to the stack pointer.
71 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
72 arg_pointer_rtx by itself is nonzero, because on at least one machine,
73 the i960, the arg pointer is zero when it is unused. */
75 #define NONZERO_BASE_PLUS_P(X) \
76 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
77 || (X) == virtual_stack_vars_rtx \
78 || (X) == virtual_incoming_args_rtx \
79 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
80 && (XEXP (X, 0) == frame_pointer_rtx \
81 || XEXP (X, 0) == hard_frame_pointer_rtx \
82 || ((X) == arg_pointer_rtx \
83 && fixed_regs[ARG_POINTER_REGNUM]) \
84 || XEXP (X, 0) == virtual_stack_vars_rtx \
85 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
86 || (X) == stack_pointer_rtx \
87 || (X) == virtual_stack_dynamic_rtx \
88 || (X) == virtual_outgoing_args_rtx \
89 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
90 && (XEXP (X, 0) == stack_pointer_rtx \
91 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
92 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
93 || GET_CODE (X) == ADDRESSOF)
95 /* Much code operates on (low, high) pairs; the low value is an
96 unsigned wide int, the high value a signed wide int. We
97 occasionally need to sign extend from low to high as if low were a
99 #define HWI_SIGN_EXTEND(low) \
100 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
102 static rtx simplify_plus_minus
PARAMS ((enum rtx_code
,
103 enum machine_mode
, rtx
, rtx
));
104 static void check_fold_consts
PARAMS ((PTR
));
105 static int entry_and_rtx_equal_p
PARAMS ((const void *, const void *));
106 static unsigned int get_value_hash
PARAMS ((const void *));
107 static struct elt_list
*new_elt_list
PARAMS ((struct elt_list
*,
109 static struct elt_loc_list
*new_elt_loc_list
PARAMS ((struct elt_loc_list
*,
111 static void unchain_one_value
PARAMS ((cselib_val
*));
112 static void unchain_one_elt_list
PARAMS ((struct elt_list
**));
113 static void unchain_one_elt_loc_list
PARAMS ((struct elt_loc_list
**));
114 static void clear_table
PARAMS ((void));
115 static int discard_useless_locs
PARAMS ((void **, void *));
116 static int discard_useless_values
PARAMS ((void **, void *));
117 static void remove_useless_values
PARAMS ((void));
118 static unsigned int hash_rtx
PARAMS ((rtx
, enum machine_mode
, int));
119 static cselib_val
*new_cselib_val
PARAMS ((unsigned int,
121 static void add_mem_for_addr
PARAMS ((cselib_val
*, cselib_val
*,
123 static cselib_val
*cselib_lookup_mem
PARAMS ((rtx
, int));
124 static rtx cselib_subst_to_values
PARAMS ((rtx
));
125 static void cselib_invalidate_regno
PARAMS ((unsigned int,
127 static int cselib_mem_conflict_p
PARAMS ((rtx
, rtx
));
128 static int cselib_invalidate_mem_1
PARAMS ((void **, void *));
129 static void cselib_invalidate_mem
PARAMS ((rtx
));
130 static void cselib_invalidate_rtx
PARAMS ((rtx
, rtx
, void *));
131 static void cselib_record_set
PARAMS ((rtx
, cselib_val
*,
133 static void cselib_record_sets
PARAMS ((rtx
));
135 /* There are three ways in which cselib can look up an rtx:
136 - for a REG, the reg_values table (which is indexed by regno) is used
137 - for a MEM, we recursively look up its address and then follow the
138 addr_list of that value
139 - for everything else, we compute a hash value and go through the hash
140 table. Since different rtx's can still have the same hash value,
141 this involves walking the table entries for a given value and comparing
142 the locations of the entries with the rtx we are looking up. */
144 /* A table that enables us to look up elts by their value. */
145 static htab_t hash_table
;
147 /* This is a global so we don't have to pass this through every function.
148 It is used in new_elt_loc_list to set SETTING_INSN. */
149 static rtx cselib_current_insn
;
151 /* Every new unknown value gets a unique number. */
152 static unsigned int next_unknown_value
;
154 /* The number of registers we had when the varrays were last resized. */
155 static unsigned int cselib_nregs
;
157 /* Count values without known locations. Whenever this grows too big, we
158 remove these useless values from the table. */
159 static int n_useless_values
;
161 /* Number of useless values before we remove them from the hash table. */
162 #define MAX_USELESS_VALUES 32
164 /* This table maps from register number to values. It does not contain
165 pointers to cselib_val structures, but rather elt_lists. The purpose is
166 to be able to refer to the same register in different modes. */
167 static varray_type reg_values
;
168 #define REG_VALUES(I) VARRAY_ELT_LIST (reg_values, (I))
170 /* We pass this to cselib_invalidate_mem to invalidate all of
171 memory for a non-const call instruction. */
174 /* Memory for our structures is allocated from this obstack. */
175 static struct obstack cselib_obstack
;
177 /* Used to quickly free all memory. */
178 static char *cselib_startobj
;
180 /* Caches for unused structures. */
181 static cselib_val
*empty_vals
;
182 static struct elt_list
*empty_elt_lists
;
183 static struct elt_loc_list
*empty_elt_loc_lists
;
185 /* Set by discard_useless_locs if it deleted the last location of any
187 static int values_became_useless
;
189 /* Make a binary operation by properly ordering the operands and
190 seeing if the expression folds. */
193 simplify_gen_binary (code
, mode
, op0
, op1
)
195 enum machine_mode mode
;
200 /* Put complex operands first and constants second if commutative. */
201 if (GET_RTX_CLASS (code
) == 'c'
202 && ((CONSTANT_P (op0
) && GET_CODE (op1
) != CONST_INT
)
203 || (GET_RTX_CLASS (GET_CODE (op0
)) == 'o'
204 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')
205 || (GET_CODE (op0
) == SUBREG
206 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0
))) == 'o'
207 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')))
208 tem
= op0
, op0
= op1
, op1
= tem
;
210 /* If this simplifies, do it. */
211 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
216 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
217 just form the operation. */
219 if (code
== PLUS
&& GET_CODE (op1
) == CONST_INT
220 && GET_MODE (op0
) != VOIDmode
)
221 return plus_constant (op0
, INTVAL (op1
));
222 else if (code
== MINUS
&& GET_CODE (op1
) == CONST_INT
223 && GET_MODE (op0
) != VOIDmode
)
224 return plus_constant (op0
, - INTVAL (op1
));
226 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
229 /* Try to simplify a unary operation CODE whose output mode is to be
230 MODE with input operand OP whose mode was originally OP_MODE.
231 Return zero if no simplification can be made. */
234 simplify_unary_operation (code
, mode
, op
, op_mode
)
236 enum machine_mode mode
;
238 enum machine_mode op_mode
;
240 unsigned int width
= GET_MODE_BITSIZE (mode
);
242 /* The order of these tests is critical so that, for example, we don't
243 check the wrong mode (input vs. output) for a conversion operation,
244 such as FIX. At some point, this should be simplified. */
246 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
248 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
249 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
251 HOST_WIDE_INT hv
, lv
;
254 if (GET_CODE (op
) == CONST_INT
)
255 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
257 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
259 #ifdef REAL_ARITHMETIC
260 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
265 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
266 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
267 d
+= (double) (unsigned HOST_WIDE_INT
) (~ lv
);
273 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
274 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
275 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
277 #endif /* REAL_ARITHMETIC */
278 d
= real_value_truncate (mode
, d
);
279 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
281 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
282 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
284 HOST_WIDE_INT hv
, lv
;
287 if (GET_CODE (op
) == CONST_INT
)
288 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
290 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
292 if (op_mode
== VOIDmode
)
294 /* We don't know how to interpret negative-looking numbers in
295 this case, so don't try to fold those. */
299 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
302 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
304 #ifdef REAL_ARITHMETIC
305 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
308 d
= (double) (unsigned HOST_WIDE_INT
) hv
;
309 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
310 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
311 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
312 #endif /* REAL_ARITHMETIC */
313 d
= real_value_truncate (mode
, d
);
314 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
318 if (GET_CODE (op
) == CONST_INT
319 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
321 register HOST_WIDE_INT arg0
= INTVAL (op
);
322 register HOST_WIDE_INT val
;
335 val
= (arg0
>= 0 ? arg0
: - arg0
);
339 /* Don't use ffs here. Instead, get low order bit and then its
340 number. If arg0 is zero, this will return 0, as desired. */
341 arg0
&= GET_MODE_MASK (mode
);
342 val
= exact_log2 (arg0
& (- arg0
)) + 1;
350 if (op_mode
== VOIDmode
)
352 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
354 /* If we were really extending the mode,
355 we would have to distinguish between zero-extension
356 and sign-extension. */
357 if (width
!= GET_MODE_BITSIZE (op_mode
))
361 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
362 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
368 if (op_mode
== VOIDmode
)
370 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
372 /* If we were really extending the mode,
373 we would have to distinguish between zero-extension
374 and sign-extension. */
375 if (width
!= GET_MODE_BITSIZE (op_mode
))
379 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
382 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
384 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
385 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
398 val
= trunc_int_for_mode (val
, mode
);
400 return GEN_INT (val
);
403 /* We can do some operations on integer CONST_DOUBLEs. Also allow
404 for a DImode operation on a CONST_INT. */
405 else if (GET_MODE (op
) == VOIDmode
&& width
<= HOST_BITS_PER_INT
* 2
406 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
408 unsigned HOST_WIDE_INT l1
, lv
;
409 HOST_WIDE_INT h1
, hv
;
411 if (GET_CODE (op
) == CONST_DOUBLE
)
412 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
414 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
424 neg_double (l1
, h1
, &lv
, &hv
);
429 neg_double (l1
, h1
, &lv
, &hv
);
437 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& (-h1
)) + 1;
439 lv
= exact_log2 (l1
& (-l1
)) + 1;
443 /* This is just a change-of-mode, so do nothing. */
448 if (op_mode
== VOIDmode
449 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
453 lv
= l1
& GET_MODE_MASK (op_mode
);
457 if (op_mode
== VOIDmode
458 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
462 lv
= l1
& GET_MODE_MASK (op_mode
);
463 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
464 && (lv
& ((HOST_WIDE_INT
) 1
465 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
466 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
468 hv
= HWI_SIGN_EXTEND (lv
);
479 return immed_double_const (lv
, hv
, mode
);
482 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
483 else if (GET_CODE (op
) == CONST_DOUBLE
484 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
490 if (setjmp (handler
))
491 /* There used to be a warning here, but that is inadvisable.
492 People may want to cause traps, and the natural way
493 to do it should not get a warning. */
496 set_float_handler (handler
);
498 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
503 d
= REAL_VALUE_NEGATE (d
);
507 if (REAL_VALUE_NEGATIVE (d
))
508 d
= REAL_VALUE_NEGATE (d
);
512 d
= real_value_truncate (mode
, d
);
516 /* All this does is change the mode. */
520 d
= REAL_VALUE_RNDZINT (d
);
524 d
= REAL_VALUE_UNSIGNED_RNDZINT (d
);
534 x
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
535 set_float_handler (NULL_PTR
);
539 else if (GET_CODE (op
) == CONST_DOUBLE
540 && GET_MODE_CLASS (GET_MODE (op
)) == MODE_FLOAT
541 && GET_MODE_CLASS (mode
) == MODE_INT
542 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
548 if (setjmp (handler
))
551 set_float_handler (handler
);
553 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
558 val
= REAL_VALUE_FIX (d
);
562 val
= REAL_VALUE_UNSIGNED_FIX (d
);
569 set_float_handler (NULL_PTR
);
571 val
= trunc_int_for_mode (val
, mode
);
573 return GEN_INT (val
);
576 /* This was formerly used only for non-IEEE float.
577 eggert@twinsun.com says it is safe for IEEE also. */
580 /* There are some simplifications we can do even if the operands
586 /* (not (not X)) == X, similarly for NEG. */
587 if (GET_CODE (op
) == code
)
592 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
593 becomes just the MINUS if its mode is MODE. This allows
594 folding switch statements on machines using casesi (such as
596 if (GET_CODE (op
) == TRUNCATE
597 && GET_MODE (XEXP (op
, 0)) == mode
598 && GET_CODE (XEXP (op
, 0)) == MINUS
599 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
600 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
603 #ifdef POINTERS_EXTEND_UNSIGNED
604 if (! POINTERS_EXTEND_UNSIGNED
605 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
607 return convert_memory_address (Pmode
, op
);
611 #ifdef POINTERS_EXTEND_UNSIGNED
613 if (POINTERS_EXTEND_UNSIGNED
614 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
616 return convert_memory_address (Pmode
, op
);
628 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
629 and OP1. Return 0 if no simplification is possible.
631 Don't use this for relational operations such as EQ or LT.
632 Use simplify_relational_operation instead. */
635 simplify_binary_operation (code
, mode
, op0
, op1
)
637 enum machine_mode mode
;
640 register HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
642 unsigned int width
= GET_MODE_BITSIZE (mode
);
645 /* Relational operations don't work here. We must know the mode
646 of the operands in order to do the comparison correctly.
647 Assuming a full word can give incorrect results.
648 Consider comparing 128 with -128 in QImode. */
650 if (GET_RTX_CLASS (code
) == '<')
653 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
654 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
655 && GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
656 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
658 REAL_VALUE_TYPE f0
, f1
, value
;
661 if (setjmp (handler
))
664 set_float_handler (handler
);
666 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
667 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
668 f0
= real_value_truncate (mode
, f0
);
669 f1
= real_value_truncate (mode
, f1
);
671 #ifdef REAL_ARITHMETIC
672 #ifndef REAL_INFINITY
673 if (code
== DIV
&& REAL_VALUES_EQUAL (f1
, dconst0
))
676 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
690 #ifndef REAL_INFINITY
697 value
= MIN (f0
, f1
);
700 value
= MAX (f0
, f1
);
707 value
= real_value_truncate (mode
, value
);
708 set_float_handler (NULL_PTR
);
709 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
711 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
713 /* We can fold some multi-word operations. */
714 if (GET_MODE_CLASS (mode
) == MODE_INT
715 && width
== HOST_BITS_PER_WIDE_INT
* 2
716 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
717 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
719 unsigned HOST_WIDE_INT l1
, l2
, lv
;
720 HOST_WIDE_INT h1
, h2
, hv
;
722 if (GET_CODE (op0
) == CONST_DOUBLE
)
723 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
725 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
727 if (GET_CODE (op1
) == CONST_DOUBLE
)
728 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
730 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
735 /* A - B == A + (-B). */
736 neg_double (l2
, h2
, &lv
, &hv
);
739 /* .. fall through ... */
742 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
746 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
749 case DIV
: case MOD
: case UDIV
: case UMOD
:
750 /* We'd need to include tree.h to do this and it doesn't seem worth
755 lv
= l1
& l2
, hv
= h1
& h2
;
759 lv
= l1
| l2
, hv
= h1
| h2
;
763 lv
= l1
^ l2
, hv
= h1
^ h2
;
769 && ((unsigned HOST_WIDE_INT
) l1
770 < (unsigned HOST_WIDE_INT
) l2
)))
779 && ((unsigned HOST_WIDE_INT
) l1
780 > (unsigned HOST_WIDE_INT
) l2
)))
787 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
789 && ((unsigned HOST_WIDE_INT
) l1
790 < (unsigned HOST_WIDE_INT
) l2
)))
797 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
799 && ((unsigned HOST_WIDE_INT
) l1
800 > (unsigned HOST_WIDE_INT
) l2
)))
806 case LSHIFTRT
: case ASHIFTRT
:
808 case ROTATE
: case ROTATERT
:
809 #ifdef SHIFT_COUNT_TRUNCATED
810 if (SHIFT_COUNT_TRUNCATED
)
811 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
814 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
817 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
818 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
820 else if (code
== ASHIFT
)
821 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
822 else if (code
== ROTATE
)
823 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
824 else /* code == ROTATERT */
825 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
832 return immed_double_const (lv
, hv
, mode
);
835 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
836 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
838 /* Even if we can't compute a constant result,
839 there are some cases worth simplifying. */
844 /* In IEEE floating point, x+0 is not the same as x. Similarly
845 for the other optimizations below. */
846 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
847 && FLOAT_MODE_P (mode
) && ! flag_fast_math
)
850 if (op1
== CONST0_RTX (mode
))
853 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
854 if (GET_CODE (op0
) == NEG
)
855 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
856 else if (GET_CODE (op1
) == NEG
)
857 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
859 /* Handle both-operands-constant cases. We can only add
860 CONST_INTs to constants since the sum of relocatable symbols
861 can't be handled by most assemblers. Don't add CONST_INT
862 to CONST_INT since overflow won't be computed properly if wider
863 than HOST_BITS_PER_WIDE_INT. */
865 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
866 && GET_CODE (op1
) == CONST_INT
)
867 return plus_constant (op0
, INTVAL (op1
));
868 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
869 && GET_CODE (op0
) == CONST_INT
)
870 return plus_constant (op1
, INTVAL (op0
));
872 /* See if this is something like X * C - X or vice versa or
873 if the multiplication is written as a shift. If so, we can
874 distribute and make a new multiply, shift, or maybe just
875 have X (if C is 2 in the example above). But don't make
876 real multiply if we didn't have one before. */
878 if (! FLOAT_MODE_P (mode
))
880 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
881 rtx lhs
= op0
, rhs
= op1
;
884 if (GET_CODE (lhs
) == NEG
)
885 coeff0
= -1, lhs
= XEXP (lhs
, 0);
886 else if (GET_CODE (lhs
) == MULT
887 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
889 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
892 else if (GET_CODE (lhs
) == ASHIFT
893 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
894 && INTVAL (XEXP (lhs
, 1)) >= 0
895 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
897 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
901 if (GET_CODE (rhs
) == NEG
)
902 coeff1
= -1, rhs
= XEXP (rhs
, 0);
903 else if (GET_CODE (rhs
) == MULT
904 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
906 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
909 else if (GET_CODE (rhs
) == ASHIFT
910 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
911 && INTVAL (XEXP (rhs
, 1)) >= 0
912 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
914 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
918 if (rtx_equal_p (lhs
, rhs
))
920 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
921 GEN_INT (coeff0
+ coeff1
));
922 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
926 /* If one of the operands is a PLUS or a MINUS, see if we can
927 simplify this by the associative law.
928 Don't use the associative law for floating point.
929 The inaccuracy makes it nonassociative,
930 and subtle programs can break if operations are associated. */
932 if (INTEGRAL_MODE_P (mode
)
933 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
934 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
935 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
941 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
942 using cc0, in which case we want to leave it as a COMPARE
943 so we can distinguish it from a register-register-copy.
945 In IEEE floating point, x-0 is not the same as x. */
947 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
948 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
949 && op1
== CONST0_RTX (mode
))
953 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
954 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
955 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
956 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
958 rtx xop00
= XEXP (op0
, 0);
959 rtx xop10
= XEXP (op1
, 0);
962 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
964 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
965 && GET_MODE (xop00
) == GET_MODE (xop10
)
966 && REGNO (xop00
) == REGNO (xop10
)
967 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
968 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
975 /* None of these optimizations can be done for IEEE
977 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
978 && FLOAT_MODE_P (mode
) && ! flag_fast_math
)
981 /* We can't assume x-x is 0 even with non-IEEE floating point,
982 but since it is zero except in very strange circumstances, we
983 will treat it as zero with -ffast-math. */
984 if (rtx_equal_p (op0
, op1
)
985 && ! side_effects_p (op0
)
986 && (! FLOAT_MODE_P (mode
) || flag_fast_math
))
987 return CONST0_RTX (mode
);
989 /* Change subtraction from zero into negation. */
990 if (op0
== CONST0_RTX (mode
))
991 return gen_rtx_NEG (mode
, op1
);
993 /* (-1 - a) is ~a. */
994 if (op0
== constm1_rtx
)
995 return gen_rtx_NOT (mode
, op1
);
997 /* Subtracting 0 has no effect. */
998 if (op1
== CONST0_RTX (mode
))
1001 /* See if this is something like X * C - X or vice versa or
1002 if the multiplication is written as a shift. If so, we can
1003 distribute and make a new multiply, shift, or maybe just
1004 have X (if C is 2 in the example above). But don't make
1005 real multiply if we didn't have one before. */
1007 if (! FLOAT_MODE_P (mode
))
1009 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1010 rtx lhs
= op0
, rhs
= op1
;
1013 if (GET_CODE (lhs
) == NEG
)
1014 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1015 else if (GET_CODE (lhs
) == MULT
1016 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1018 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1021 else if (GET_CODE (lhs
) == ASHIFT
1022 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1023 && INTVAL (XEXP (lhs
, 1)) >= 0
1024 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1026 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1027 lhs
= XEXP (lhs
, 0);
1030 if (GET_CODE (rhs
) == NEG
)
1031 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1032 else if (GET_CODE (rhs
) == MULT
1033 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1035 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1038 else if (GET_CODE (rhs
) == ASHIFT
1039 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1040 && INTVAL (XEXP (rhs
, 1)) >= 0
1041 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1043 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1044 rhs
= XEXP (rhs
, 0);
1047 if (rtx_equal_p (lhs
, rhs
))
1049 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1050 GEN_INT (coeff0
- coeff1
));
1051 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1055 /* (a - (-b)) -> (a + b). */
1056 if (GET_CODE (op1
) == NEG
)
1057 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1059 /* If one of the operands is a PLUS or a MINUS, see if we can
1060 simplify this by the associative law.
1061 Don't use the associative law for floating point.
1062 The inaccuracy makes it nonassociative,
1063 and subtle programs can break if operations are associated. */
1065 if (INTEGRAL_MODE_P (mode
)
1066 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1067 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
1068 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1071 /* Don't let a relocatable value get a negative coeff. */
1072 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1073 return plus_constant (op0
, - INTVAL (op1
));
1075 /* (x - (x & y)) -> (x & ~y) */
1076 if (GET_CODE (op1
) == AND
)
1078 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1079 return simplify_gen_binary (AND
, mode
, op0
,
1080 gen_rtx_NOT (mode
, XEXP (op1
, 1)));
1081 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1082 return simplify_gen_binary (AND
, mode
, op0
,
1083 gen_rtx_NOT (mode
, XEXP (op1
, 0)));
1088 if (op1
== constm1_rtx
)
1090 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1092 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1095 /* In IEEE floating point, x*0 is not always 0. */
1096 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1097 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
1098 && op1
== CONST0_RTX (mode
)
1099 && ! side_effects_p (op0
))
1102 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1103 However, ANSI says we can drop signals,
1104 so we can do this anyway. */
1105 if (op1
== CONST1_RTX (mode
))
1108 /* Convert multiply by constant power of two into shift unless
1109 we are still generating RTL. This test is a kludge. */
1110 if (GET_CODE (op1
) == CONST_INT
1111 && (val
= exact_log2 (INTVAL (op1
))) >= 0
1112 /* If the mode is larger than the host word size, and the
1113 uppermost bit is set, then this isn't a power of two due
1114 to implicit sign extension. */
1115 && (width
<= HOST_BITS_PER_WIDE_INT
1116 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1117 && ! rtx_equal_function_value_matters
)
1118 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1120 if (GET_CODE (op1
) == CONST_DOUBLE
1121 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
)
1125 int op1is2
, op1ism1
;
1127 if (setjmp (handler
))
1130 set_float_handler (handler
);
1131 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1132 op1is2
= REAL_VALUES_EQUAL (d
, dconst2
);
1133 op1ism1
= REAL_VALUES_EQUAL (d
, dconstm1
);
1134 set_float_handler (NULL_PTR
);
1136 /* x*2 is x+x and x*(-1) is -x */
1137 if (op1is2
&& GET_MODE (op0
) == mode
)
1138 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1140 else if (op1ism1
&& GET_MODE (op0
) == mode
)
1141 return gen_rtx_NEG (mode
, op0
);
1146 if (op1
== const0_rtx
)
1148 if (GET_CODE (op1
) == CONST_INT
1149 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1151 if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1153 /* A | (~A) -> -1 */
1154 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1155 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1156 && ! side_effects_p (op0
)
1157 && GET_MODE_CLASS (mode
) != MODE_CC
)
1162 if (op1
== const0_rtx
)
1164 if (GET_CODE (op1
) == CONST_INT
1165 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1166 return gen_rtx_NOT (mode
, op0
);
1167 if (op0
== op1
&& ! side_effects_p (op0
)
1168 && GET_MODE_CLASS (mode
) != MODE_CC
)
1173 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1175 if (GET_CODE (op1
) == CONST_INT
1176 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1178 if (op0
== op1
&& ! side_effects_p (op0
)
1179 && GET_MODE_CLASS (mode
) != MODE_CC
)
1182 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1183 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1184 && ! side_effects_p (op0
)
1185 && GET_MODE_CLASS (mode
) != MODE_CC
)
1190 /* Convert divide by power of two into shift (divide by 1 handled
1192 if (GET_CODE (op1
) == CONST_INT
1193 && (arg1
= exact_log2 (INTVAL (op1
))) > 0)
1194 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1196 /* ... fall through ... */
1199 if (op1
== CONST1_RTX (mode
))
1202 /* In IEEE floating point, 0/x is not always 0. */
1203 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1204 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
1205 && op0
== CONST0_RTX (mode
)
1206 && ! side_effects_p (op1
))
1209 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1210 /* Change division by a constant into multiplication. Only do
1211 this with -ffast-math until an expert says it is safe in
1213 else if (GET_CODE (op1
) == CONST_DOUBLE
1214 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
1215 && op1
!= CONST0_RTX (mode
)
1219 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1221 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1223 #if defined (REAL_ARITHMETIC)
1224 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1225 return gen_rtx_MULT (mode
, op0
,
1226 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1229 gen_rtx_MULT (mode
, op0
,
1230 CONST_DOUBLE_FROM_REAL_VALUE (1./d
, mode
));
1238 /* Handle modulus by power of two (mod with 1 handled below). */
1239 if (GET_CODE (op1
) == CONST_INT
1240 && exact_log2 (INTVAL (op1
)) > 0)
1241 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1243 /* ... fall through ... */
1246 if ((op0
== const0_rtx
|| op1
== const1_rtx
)
1247 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1253 /* Rotating ~0 always results in ~0. */
1254 if (GET_CODE (op0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1255 && (unsigned HOST_WIDE_INT
) INTVAL (op0
) == GET_MODE_MASK (mode
)
1256 && ! side_effects_p (op1
))
1259 /* ... fall through ... */
1264 if (op1
== const0_rtx
)
1266 if (op0
== const0_rtx
&& ! side_effects_p (op1
))
1271 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1272 && INTVAL (op1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1273 && ! side_effects_p (op0
))
1275 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1280 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1281 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
1282 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1283 && ! side_effects_p (op0
))
1285 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1290 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1292 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1297 if (op1
== constm1_rtx
&& ! side_effects_p (op0
))
1299 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1310 /* Get the integer argument values in two forms:
1311 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1313 arg0
= INTVAL (op0
);
1314 arg1
= INTVAL (op1
);
1316 if (width
< HOST_BITS_PER_WIDE_INT
)
1318 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1319 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1322 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1323 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1326 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1327 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1335 /* Compute the value of the arithmetic. */
1340 val
= arg0s
+ arg1s
;
1344 val
= arg0s
- arg1s
;
1348 val
= arg0s
* arg1s
;
1354 val
= arg0s
/ arg1s
;
1360 val
= arg0s
% arg1s
;
1366 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1372 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1388 /* If shift count is undefined, don't fold it; let the machine do
1389 what it wants. But truncate it if the machine will do that. */
1393 #ifdef SHIFT_COUNT_TRUNCATED
1394 if (SHIFT_COUNT_TRUNCATED
)
1398 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1405 #ifdef SHIFT_COUNT_TRUNCATED
1406 if (SHIFT_COUNT_TRUNCATED
)
1410 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1417 #ifdef SHIFT_COUNT_TRUNCATED
1418 if (SHIFT_COUNT_TRUNCATED
)
1422 val
= arg0s
>> arg1
;
1424 /* Bootstrap compiler may not have sign extended the right shift.
1425 Manually extend the sign to insure bootstrap cc matches gcc. */
1426 if (arg0s
< 0 && arg1
> 0)
1427 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1436 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1437 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1445 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1446 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1450 /* Do nothing here. */
1454 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1458 val
= ((unsigned HOST_WIDE_INT
) arg0
1459 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1463 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1467 val
= ((unsigned HOST_WIDE_INT
) arg0
1468 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1475 val
= trunc_int_for_mode (val
, mode
);
1477 return GEN_INT (val
);
1480 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1483 Rather than test for specific case, we do this by a brute-force method
1484 and do all possible simplifications until no more changes occur. Then
1485 we rebuild the operation. */
1488 simplify_plus_minus (code
, mode
, op0
, op1
)
1490 enum machine_mode mode
;
1496 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
= 0;
1497 int first
= 1, negate
= 0, changed
;
1500 bzero ((char *) ops
, sizeof ops
);
1502 /* Set up the two operands and then expand them until nothing has been
1503 changed. If we run out of room in our array, give up; this should
1504 almost never happen. */
1506 ops
[0] = op0
, ops
[1] = op1
, negs
[0] = 0, negs
[1] = (code
== MINUS
);
1513 for (i
= 0; i
< n_ops
; i
++)
1514 switch (GET_CODE (ops
[i
]))
1521 ops
[n_ops
] = XEXP (ops
[i
], 1);
1522 negs
[n_ops
++] = GET_CODE (ops
[i
]) == MINUS
? !negs
[i
] : negs
[i
];
1523 ops
[i
] = XEXP (ops
[i
], 0);
1529 ops
[i
] = XEXP (ops
[i
], 0);
1530 negs
[i
] = ! negs
[i
];
1535 ops
[i
] = XEXP (ops
[i
], 0);
1541 /* ~a -> (-a - 1) */
1544 ops
[n_ops
] = constm1_rtx
;
1545 negs
[n_ops
++] = negs
[i
];
1546 ops
[i
] = XEXP (ops
[i
], 0);
1547 negs
[i
] = ! negs
[i
];
1554 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0, changed
= 1;
1562 /* If we only have two operands, we can't do anything. */
1566 /* Now simplify each pair of operands until nothing changes. The first
1567 time through just simplify constants against each other. */
1574 for (i
= 0; i
< n_ops
- 1; i
++)
1575 for (j
= i
+ 1; j
< n_ops
; j
++)
1576 if (ops
[i
] != 0 && ops
[j
] != 0
1577 && (! first
|| (CONSTANT_P (ops
[i
]) && CONSTANT_P (ops
[j
]))))
1579 rtx lhs
= ops
[i
], rhs
= ops
[j
];
1580 enum rtx_code ncode
= PLUS
;
1582 if (negs
[i
] && ! negs
[j
])
1583 lhs
= ops
[j
], rhs
= ops
[i
], ncode
= MINUS
;
1584 else if (! negs
[i
] && negs
[j
])
1587 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
1590 ops
[i
] = tem
, ops
[j
] = 0;
1591 negs
[i
] = negs
[i
] && negs
[j
];
1592 if (GET_CODE (tem
) == NEG
)
1593 ops
[i
] = XEXP (tem
, 0), negs
[i
] = ! negs
[i
];
1595 if (GET_CODE (ops
[i
]) == CONST_INT
&& negs
[i
])
1596 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0;
1604 /* Pack all the operands to the lower-numbered entries and give up if
1605 we didn't reduce the number of operands we had. Make sure we
1606 count a CONST as two operands. If we have the same number of
1607 operands, but have made more CONSTs than we had, this is also
1608 an improvement, so accept it. */
1610 for (i
= 0, j
= 0; j
< n_ops
; j
++)
1613 ops
[i
] = ops
[j
], negs
[i
++] = negs
[j
];
1614 if (GET_CODE (ops
[j
]) == CONST
)
1618 if (i
+ n_consts
> input_ops
1619 || (i
+ n_consts
== input_ops
&& n_consts
<= input_consts
))
1624 /* If we have a CONST_INT, put it last. */
1625 for (i
= 0; i
< n_ops
- 1; i
++)
1626 if (GET_CODE (ops
[i
]) == CONST_INT
)
1628 tem
= ops
[n_ops
- 1], ops
[n_ops
- 1] = ops
[i
] , ops
[i
] = tem
;
1629 j
= negs
[n_ops
- 1], negs
[n_ops
- 1] = negs
[i
], negs
[i
] = j
;
1632 /* Put a non-negated operand first. If there aren't any, make all
1633 operands positive and negate the whole thing later. */
1634 for (i
= 0; i
< n_ops
&& negs
[i
]; i
++)
1639 for (i
= 0; i
< n_ops
; i
++)
1645 tem
= ops
[0], ops
[0] = ops
[i
], ops
[i
] = tem
;
1646 j
= negs
[0], negs
[0] = negs
[i
], negs
[i
] = j
;
1649 /* Now make the result by performing the requested operations. */
1651 for (i
= 1; i
< n_ops
; i
++)
1652 result
= simplify_gen_binary (negs
[i
] ? MINUS
: PLUS
, mode
, result
, ops
[i
]);
1654 return negate
? gen_rtx_NEG (mode
, result
) : result
;
1659 rtx op0
, op1
; /* Input */
1660 int equal
, op0lt
, op1lt
; /* Output */
1664 check_fold_consts (data
)
1667 struct cfc_args
*args
= (struct cfc_args
*) data
;
1668 REAL_VALUE_TYPE d0
, d1
;
1670 REAL_VALUE_FROM_CONST_DOUBLE (d0
, args
->op0
);
1671 REAL_VALUE_FROM_CONST_DOUBLE (d1
, args
->op1
);
1672 args
->equal
= REAL_VALUES_EQUAL (d0
, d1
);
1673 args
->op0lt
= REAL_VALUES_LESS (d0
, d1
);
1674 args
->op1lt
= REAL_VALUES_LESS (d1
, d0
);
1677 /* Like simplify_binary_operation except used for relational operators.
1678 MODE is the mode of the operands, not that of the result. If MODE
1679 is VOIDmode, both operands must also be VOIDmode and we compare the
1680 operands in "infinite precision".
1682 If no simplification is possible, this function returns zero. Otherwise,
1683 it returns either const_true_rtx or const0_rtx. */
1686 simplify_relational_operation (code
, mode
, op0
, op1
)
1688 enum machine_mode mode
;
1691 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
1694 if (mode
== VOIDmode
1695 && (GET_MODE (op0
) != VOIDmode
1696 || GET_MODE (op1
) != VOIDmode
))
1699 /* If op0 is a compare, extract the comparison arguments from it. */
1700 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
1701 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
1703 /* We can't simplify MODE_CC values since we don't know what the
1704 actual comparison is. */
1705 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
1712 /* Make sure the constant is second. */
1713 if ((CONSTANT_P (op0
) && ! CONSTANT_P (op1
))
1714 || (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) != CONST_INT
))
1716 tem
= op0
, op0
= op1
, op1
= tem
;
1717 code
= swap_condition (code
);
1720 /* For integer comparisons of A and B maybe we can simplify A - B and can
1721 then simplify a comparison of that with zero. If A and B are both either
1722 a register or a CONST_INT, this can't help; testing for these cases will
1723 prevent infinite recursion here and speed things up.
1725 If CODE is an unsigned comparison, then we can never do this optimization,
1726 because it gives an incorrect result if the subtraction wraps around zero.
1727 ANSI C defines unsigned operations such that they never overflow, and
1728 thus such cases can not be ignored. */
1730 if (INTEGRAL_MODE_P (mode
) && op1
!= const0_rtx
1731 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == CONST_INT
)
1732 && (GET_CODE (op1
) == REG
|| GET_CODE (op1
) == CONST_INT
))
1733 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
1734 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
1735 return simplify_relational_operation (signed_condition (code
),
1736 mode
, tem
, const0_rtx
);
1738 /* For non-IEEE floating-point, if the two operands are equal, we know the
1740 if (rtx_equal_p (op0
, op1
)
1741 && (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1742 || ! FLOAT_MODE_P (GET_MODE (op0
)) || flag_fast_math
))
1743 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
1745 /* If the operands are floating-point constants, see if we can fold
1747 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1748 else if (GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
1749 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
1751 struct cfc_args args
;
1753 /* Setup input for check_fold_consts() */
1757 if (do_float_handler(check_fold_consts
, (PTR
) &args
) == 0)
1758 /* We got an exception from check_fold_consts() */
1761 /* Receive output from check_fold_consts() */
1763 op0lt
= op0ltu
= args
.op0lt
;
1764 op1lt
= op1ltu
= args
.op1lt
;
1766 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1768 /* Otherwise, see if the operands are both integers. */
1769 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
1770 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
1771 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
1773 int width
= GET_MODE_BITSIZE (mode
);
1774 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
1775 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
1777 /* Get the two words comprising each integer constant. */
1778 if (GET_CODE (op0
) == CONST_DOUBLE
)
1780 l0u
= l0s
= CONST_DOUBLE_LOW (op0
);
1781 h0u
= h0s
= CONST_DOUBLE_HIGH (op0
);
1785 l0u
= l0s
= INTVAL (op0
);
1786 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
1789 if (GET_CODE (op1
) == CONST_DOUBLE
)
1791 l1u
= l1s
= CONST_DOUBLE_LOW (op1
);
1792 h1u
= h1s
= CONST_DOUBLE_HIGH (op1
);
1796 l1u
= l1s
= INTVAL (op1
);
1797 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
1800 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1801 we have to sign or zero-extend the values. */
1802 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
1803 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
1805 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
1807 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1808 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1810 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1811 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1813 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1814 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1817 equal
= (h0u
== h1u
&& l0u
== l1u
);
1818 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
1819 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
1820 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
1821 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
1824 /* Otherwise, there are some code-specific tests we can make. */
1830 /* References to the frame plus a constant or labels cannot
1831 be zero, but a SYMBOL_REF can due to #pragma weak. */
1832 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1833 || GET_CODE (op0
) == LABEL_REF
)
1834 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1835 /* On some machines, the ap reg can be 0 sometimes. */
1836 && op0
!= arg_pointer_rtx
1843 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1844 || GET_CODE (op0
) == LABEL_REF
)
1845 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1846 && op0
!= arg_pointer_rtx
1849 return const_true_rtx
;
1853 /* Unsigned values are never negative. */
1854 if (op1
== const0_rtx
)
1855 return const_true_rtx
;
1859 if (op1
== const0_rtx
)
1864 /* Unsigned values are never greater than the largest
1866 if (GET_CODE (op1
) == CONST_INT
1867 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
1868 && INTEGRAL_MODE_P (mode
))
1869 return const_true_rtx
;
1873 if (GET_CODE (op1
) == CONST_INT
1874 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
1875 && INTEGRAL_MODE_P (mode
))
1886 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
1891 return equal
? const_true_rtx
: const0_rtx
;
1893 return ! equal
? const_true_rtx
: const0_rtx
;
1895 return op0lt
? const_true_rtx
: const0_rtx
;
1897 return op1lt
? const_true_rtx
: const0_rtx
;
1899 return op0ltu
? const_true_rtx
: const0_rtx
;
1901 return op1ltu
? const_true_rtx
: const0_rtx
;
1903 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
1905 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
1907 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
1909 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
1915 /* Simplify CODE, an operation with result mode MODE and three operands,
1916 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
1917 a constant. Return 0 if no simplifications is possible. */
1920 simplify_ternary_operation (code
, mode
, op0_mode
, op0
, op1
, op2
)
1922 enum machine_mode mode
, op0_mode
;
1925 unsigned int width
= GET_MODE_BITSIZE (mode
);
1927 /* VOIDmode means "infinite" precision. */
1929 width
= HOST_BITS_PER_WIDE_INT
;
1935 if (GET_CODE (op0
) == CONST_INT
1936 && GET_CODE (op1
) == CONST_INT
1937 && GET_CODE (op2
) == CONST_INT
1938 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
1939 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
1941 /* Extracting a bit-field from a constant */
1942 HOST_WIDE_INT val
= INTVAL (op0
);
1944 if (BITS_BIG_ENDIAN
)
1945 val
>>= (GET_MODE_BITSIZE (op0_mode
)
1946 - INTVAL (op2
) - INTVAL (op1
));
1948 val
>>= INTVAL (op2
);
1950 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
1952 /* First zero-extend. */
1953 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
1954 /* If desired, propagate sign bit. */
1955 if (code
== SIGN_EXTRACT
1956 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
1957 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
1960 /* Clear the bits that don't belong in our mode,
1961 unless they and our sign bit are all one.
1962 So we get either a reasonable negative value or a reasonable
1963 unsigned value for this mode. */
1964 if (width
< HOST_BITS_PER_WIDE_INT
1965 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
1966 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
1967 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1969 return GEN_INT (val
);
1974 if (GET_CODE (op0
) == CONST_INT
)
1975 return op0
!= const0_rtx
? op1
: op2
;
1977 /* Convert a == b ? b : a to "a". */
1978 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
1979 && rtx_equal_p (XEXP (op0
, 0), op1
)
1980 && rtx_equal_p (XEXP (op0
, 1), op2
))
1982 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
1983 && rtx_equal_p (XEXP (op0
, 1), op1
)
1984 && rtx_equal_p (XEXP (op0
, 0), op2
))
1986 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
1988 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
1989 ? GET_MODE (XEXP (op0
, 1))
1990 : GET_MODE (XEXP (op0
, 0)));
1992 = simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
1993 XEXP (op0
, 0), XEXP (op0
, 1));
1995 /* See if any simplifications were possible. */
1996 if (temp
== const0_rtx
)
1998 else if (temp
== const1_rtx
)
2003 /* Look for happy constants in op1 and op2. */
2004 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2006 HOST_WIDE_INT t
= INTVAL (op1
);
2007 HOST_WIDE_INT f
= INTVAL (op2
);
2009 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2010 code
= GET_CODE (op0
);
2011 else if (t
== 0 && f
== STORE_FLAG_VALUE
2012 && can_reverse_comparison_p (op0
, NULL_RTX
))
2013 code
= reverse_condition (GET_CODE (op0
));
2017 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2029 /* Simplify X, an rtx expression.
2031 Return the simplified expression or NULL if no simplifications
2034 This is the preferred entry point into the simplification routines;
2035 however, we still allow passes to call the more specific routines.
2037 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2038 code that need to be unified.
2040 1. fold_rtx in cse.c. This code uses various CSE specific
2041 information to aid in RTL simplification.
2043 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2044 it uses combine specific information to aid in RTL
2047 3. The routines in this file.
2050 Long term we want to only have one body of simplification code; to
2051 get to that state I recommend the following steps:
2053 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2054 which are not pass dependent state into these routines.
2056 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2057 use this routine whenever possible.
2059 3. Allow for pass dependent state to be provided to these
2060 routines and add simplifications based on the pass dependent
2061 state. Remove code from cse.c & combine.c that becomes
2064 It will take time, but ultimately the compiler will be easier to
2065 maintain and improve. It's totally silly that when we add a
2066 simplification that it needs to be added to 4 places (3 for RTL
2067 simplification and 1 for tree simplification. */
2074 enum machine_mode mode
;
2076 mode
= GET_MODE (x
);
2077 code
= GET_CODE (x
);
2079 switch (GET_RTX_CLASS (code
))
2082 return simplify_unary_operation (code
, mode
,
2083 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
2086 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
2090 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
2091 XEXP (x
, 0), XEXP (x
, 1), XEXP (x
, 2));
2094 return simplify_relational_operation (code
,
2095 (GET_MODE (XEXP (x
, 0)) != VOIDmode
2096 ? GET_MODE (XEXP (x
, 0))
2097 : GET_MODE (XEXP (x
, 1))),
2098 XEXP (x
, 0), XEXP (x
, 1));
2105 /* Allocate a struct elt_list and fill in its two elements with the
2108 static struct elt_list
*
2109 new_elt_list (next
, elt
)
2110 struct elt_list
*next
;
2113 struct elt_list
*el
= empty_elt_lists
;
2116 empty_elt_lists
= el
->next
;
2118 el
= (struct elt_list
*) obstack_alloc (&cselib_obstack
,
2119 sizeof (struct elt_list
));
2125 /* Allocate a struct elt_loc_list and fill in its two elements with the
2128 static struct elt_loc_list
*
2129 new_elt_loc_list (next
, loc
)
2130 struct elt_loc_list
*next
;
2133 struct elt_loc_list
*el
= empty_elt_loc_lists
;
2136 empty_elt_loc_lists
= el
->next
;
2138 el
= (struct elt_loc_list
*) obstack_alloc (&cselib_obstack
,
2139 sizeof (struct elt_loc_list
));
2142 el
->setting_insn
= cselib_current_insn
;
2146 /* The elt_list at *PL is no longer needed. Unchain it and free its
2150 unchain_one_elt_list (pl
)
2151 struct elt_list
**pl
;
2153 struct elt_list
*l
= *pl
;
2156 l
->next
= empty_elt_lists
;
2157 empty_elt_lists
= l
;
2160 /* Likewise for elt_loc_lists. */
2163 unchain_one_elt_loc_list (pl
)
2164 struct elt_loc_list
**pl
;
2166 struct elt_loc_list
*l
= *pl
;
2169 l
->next
= empty_elt_loc_lists
;
2170 empty_elt_loc_lists
= l
;
2173 /* Likewise for cselib_vals. This also frees the addr_list associated with
2177 unchain_one_value (v
)
2180 while (v
->addr_list
)
2181 unchain_one_elt_list (&v
->addr_list
);
2183 v
->u
.next_free
= empty_vals
;
2187 /* Remove all entries from the hash table. Also used during
2195 for (i
= 0; i
< cselib_nregs
; i
++)
2198 htab_empty (hash_table
);
2199 obstack_free (&cselib_obstack
, cselib_startobj
);
2202 empty_elt_lists
= 0;
2203 empty_elt_loc_lists
= 0;
2204 n_useless_values
= 0;
2206 next_unknown_value
= 0;
2209 /* The equality test for our hash table. The first argument ENTRY is a table
2210 element (i.e. a cselib_val), while the second arg X is an rtx. */
2213 entry_and_rtx_equal_p (entry
, x_arg
)
2214 const void *entry
, *x_arg
;
2216 struct elt_loc_list
*l
;
2217 const cselib_val
*v
= (const cselib_val
*) entry
;
2218 rtx x
= (rtx
) x_arg
;
2220 /* We don't guarantee that distinct rtx's have different hash values,
2221 so we need to do a comparison. */
2222 for (l
= v
->locs
; l
; l
= l
->next
)
2223 if (rtx_equal_for_cselib_p (l
->loc
, x
))
2229 /* The hash function for our hash table. The value is always computed with
2230 hash_rtx when adding an element; this function just extracts the hash
2231 value from a cselib_val structure. */
2234 get_value_hash (entry
)
2237 const cselib_val
*v
= (const cselib_val
*) entry
;
2241 /* Return true if X contains a VALUE rtx. If ONLY_USELESS is set, we
2242 only return true for values which point to a cselib_val whose value
2243 element has been set to zero, which implies the cselib_val will be
2247 references_value_p (x
, only_useless
)
2251 enum rtx_code code
= GET_CODE (x
);
2252 const char *fmt
= GET_RTX_FORMAT (code
);
2255 if (GET_CODE (x
) == VALUE
2256 && (! only_useless
|| CSELIB_VAL_PTR (x
)->locs
== 0))
2259 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2261 if (fmt
[i
] == 'e' && references_value_p (XEXP (x
, i
), only_useless
))
2263 else if (fmt
[i
] == 'E')
2264 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2265 if (references_value_p (XVECEXP (x
, i
, j
), only_useless
))
2272 /* For all locations found in X, delete locations that reference useless
2273 values (i.e. values without any location). Called through
2277 discard_useless_locs (x
, info
)
2279 void *info ATTRIBUTE_UNUSED
;
2281 cselib_val
*v
= (cselib_val
*)*x
;
2282 struct elt_loc_list
**p
= &v
->locs
;
2283 int had_locs
= v
->locs
!= 0;
2287 if (references_value_p ((*p
)->loc
, 1))
2288 unchain_one_elt_loc_list (p
);
2293 if (had_locs
&& v
->locs
== 0)
2296 values_became_useless
= 1;
2301 /* If X is a value with no locations, remove it from the hashtable. */
2304 discard_useless_values (x
, info
)
2306 void *info ATTRIBUTE_UNUSED
;
2308 cselib_val
*v
= (cselib_val
*)*x
;
2312 htab_clear_slot (hash_table
, x
);
2313 unchain_one_value (v
);
2320 /* Clean out useless values (i.e. those which no longer have locations
2321 associated with them) from the hash table. */
2324 remove_useless_values ()
2326 /* First pass: eliminate locations that reference the value. That in
2327 turn can make more values useless. */
2330 values_became_useless
= 0;
2331 htab_traverse (hash_table
, discard_useless_locs
, 0);
2333 while (values_became_useless
);
2335 /* Second pass: actually remove the values. */
2336 htab_traverse (hash_table
, discard_useless_values
, 0);
2338 if (n_useless_values
!= 0)
2342 /* Return nonzero if we can prove that X and Y contain the same value, taking
2343 our gathered information into account. */
2346 rtx_equal_for_cselib_p (x
, y
)
2353 if (GET_CODE (x
) == REG
|| GET_CODE (x
) == MEM
)
2355 cselib_val
*e
= cselib_lookup (x
, VOIDmode
, 0);
2361 if (GET_CODE (y
) == REG
|| GET_CODE (y
) == MEM
)
2363 cselib_val
*e
= cselib_lookup (y
, VOIDmode
, 0);
2372 if (GET_CODE (x
) == VALUE
&& GET_CODE (y
) == VALUE
)
2373 return CSELIB_VAL_PTR (x
) == CSELIB_VAL_PTR (y
);
2375 if (GET_CODE (x
) == VALUE
)
2377 cselib_val
*e
= CSELIB_VAL_PTR (x
);
2378 struct elt_loc_list
*l
;
2380 for (l
= e
->locs
; l
; l
= l
->next
)
2384 /* Avoid infinite recursion. */
2385 if (GET_CODE (t
) == REG
|| GET_CODE (t
) == MEM
)
2387 else if (rtx_equal_for_cselib_p (t
, y
))
2394 if (GET_CODE (y
) == VALUE
)
2396 cselib_val
*e
= CSELIB_VAL_PTR (y
);
2397 struct elt_loc_list
*l
;
2399 for (l
= e
->locs
; l
; l
= l
->next
)
2403 if (GET_CODE (t
) == REG
|| GET_CODE (t
) == MEM
)
2405 else if (rtx_equal_for_cselib_p (x
, t
))
2412 if (GET_CODE (x
) != GET_CODE (y
) || GET_MODE (x
) != GET_MODE (y
))
2415 /* This won't be handled correctly by the code below. */
2416 if (GET_CODE (x
) == LABEL_REF
)
2417 return XEXP (x
, 0) == XEXP (y
, 0);
2419 code
= GET_CODE (x
);
2420 fmt
= GET_RTX_FORMAT (code
);
2422 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2429 if (XWINT (x
, i
) != XWINT (y
, i
))
2435 if (XINT (x
, i
) != XINT (y
, i
))
2441 /* Two vectors must have the same length. */
2442 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
2445 /* And the corresponding elements must match. */
2446 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2447 if (! rtx_equal_for_cselib_p (XVECEXP (x
, i
, j
),
2453 if (! rtx_equal_for_cselib_p (XEXP (x
, i
), XEXP (y
, i
)))
2459 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
2464 /* These are just backpointers, so they don't matter. */
2471 /* It is believed that rtx's at this level will never
2472 contain anything but integers and other rtx's,
2473 except for within LABEL_REFs and SYMBOL_REFs. */
2481 /* Hash an rtx. Return 0 if we couldn't hash the rtx.
2482 For registers and memory locations, we look up their cselib_val structure
2483 and return its VALUE element.
2484 Possible reasons for return 0 are: the object is volatile, or we couldn't
2485 find a register or memory location in the table and CREATE is zero. If
2486 CREATE is nonzero, table elts are created for regs and mem.
2487 MODE is used in hashing for CONST_INTs only;
2488 otherwise the mode of X is used. */
2491 hash_rtx (x
, mode
, create
)
2493 enum machine_mode mode
;
2500 unsigned int hash
= 0;
2502 /* repeat is used to turn tail-recursion into iteration. */
2504 code
= GET_CODE (x
);
2505 hash
+= (unsigned) code
+ (unsigned) GET_MODE (x
);
2511 e
= cselib_lookup (x
, GET_MODE (x
), create
);
2519 hash
+= ((unsigned) CONST_INT
<< 7) + (unsigned) mode
+ INTVAL (x
);
2520 return hash
? hash
: CONST_INT
;
2523 /* This is like the general case, except that it only counts
2524 the integers representing the constant. */
2525 hash
+= (unsigned) code
+ (unsigned) GET_MODE (x
);
2526 if (GET_MODE (x
) != VOIDmode
)
2527 for (i
= 2; i
< GET_RTX_LENGTH (CONST_DOUBLE
); i
++)
2528 hash
+= XWINT (x
, i
);
2530 hash
+= ((unsigned) CONST_DOUBLE_LOW (x
)
2531 + (unsigned) CONST_DOUBLE_HIGH (x
));
2532 return hash
? hash
: CONST_DOUBLE
;
2534 /* Assume there is only one rtx object for any given label. */
2537 += ((unsigned) LABEL_REF
<< 7) + (unsigned long) XEXP (x
, 0);
2538 return hash
? hash
: LABEL_REF
;
2542 += ((unsigned) SYMBOL_REF
<< 7) + (unsigned long) XSTR (x
, 0);
2543 return hash
? hash
: SYMBOL_REF
;
2554 case UNSPEC_VOLATILE
:
2558 if (MEM_VOLATILE_P (x
))
2567 i
= GET_RTX_LENGTH (code
) - 1;
2568 fmt
= GET_RTX_FORMAT (code
);
2573 rtx tem
= XEXP (x
, i
);
2574 unsigned int tem_hash
;
2576 /* If we are about to do the last recursive call
2577 needed at this level, change it into iteration.
2578 This function is called enough to be worth it. */
2585 tem_hash
= hash_rtx (tem
, 0, create
);
2591 else if (fmt
[i
] == 'E')
2592 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2594 unsigned int tem_hash
= hash_rtx (XVECEXP (x
, i
, j
), 0, create
);
2601 else if (fmt
[i
] == 's')
2603 const unsigned char *p
= (const unsigned char *) XSTR (x
, i
);
2609 else if (fmt
[i
] == 'i')
2610 hash
+= XINT (x
, i
);
2611 else if (fmt
[i
] == '0' || fmt
[i
] == 't')
2617 return hash
? hash
: 1 + GET_CODE (x
);
2620 /* Create a new value structure for VALUE and initialize it. The mode of the
2624 new_cselib_val (value
, mode
)
2626 enum machine_mode mode
;
2628 cselib_val
*e
= empty_vals
;
2631 empty_vals
= e
->u
.next_free
;
2633 e
= (cselib_val
*) obstack_alloc (&cselib_obstack
, sizeof (cselib_val
));
2639 e
->u
.val_rtx
= gen_rtx_VALUE (mode
);
2640 CSELIB_VAL_PTR (e
->u
.val_rtx
) = e
;
2646 /* ADDR_ELT is a value that is used as address. MEM_ELT is the value that
2647 contains the data at this address. X is a MEM that represents the
2648 value. Update the two value structures to represent this situation. */
2651 add_mem_for_addr (addr_elt
, mem_elt
, x
)
2652 cselib_val
*addr_elt
, *mem_elt
;
2656 struct elt_loc_list
*l
;
2658 /* Avoid duplicates. */
2659 for (l
= mem_elt
->locs
; l
; l
= l
->next
)
2660 if (GET_CODE (l
->loc
) == MEM
2661 && CSELIB_VAL_PTR (XEXP (l
->loc
, 0)) == addr_elt
)
2664 new = gen_rtx_MEM (GET_MODE (x
), addr_elt
->u
.val_rtx
);
2665 MEM_COPY_ATTRIBUTES (new, x
);
2667 addr_elt
->addr_list
= new_elt_list (addr_elt
->addr_list
, mem_elt
);
2668 mem_elt
->locs
= new_elt_loc_list (mem_elt
->locs
, new);
2671 /* Subroutine of cselib_lookup. Return a value for X, which is a MEM rtx.
2672 If CREATE, make a new one if we haven't seen it before. */
2675 cselib_lookup_mem (x
, create
)
2681 cselib_val
*mem_elt
;
2684 if (MEM_VOLATILE_P (x
) || GET_MODE (x
) == BLKmode
2685 || (FLOAT_MODE_P (GET_MODE (x
)) && flag_float_store
))
2688 /* Look up the value for the address. */
2689 addr
= cselib_lookup (XEXP (x
, 0), GET_MODE (x
), create
);
2693 /* Find a value that describes a value of our mode at that address. */
2694 for (l
= addr
->addr_list
; l
; l
= l
->next
)
2695 if (GET_MODE (l
->elt
->u
.val_rtx
) == GET_MODE (x
))
2701 mem_elt
= new_cselib_val (++next_unknown_value
, GET_MODE (x
));
2702 add_mem_for_addr (addr
, mem_elt
, x
);
2703 slot
= htab_find_slot_with_hash (hash_table
, x
, mem_elt
->value
, INSERT
);
2708 /* Walk rtx X and replace all occurrences of REG and MEM subexpressions
2709 with VALUE expressions. This way, it becomes independent of changes
2710 to registers and memory.
2711 X isn't actually modified; if modifications are needed, new rtl is
2712 allocated. However, the return value can share rtl with X. */
2715 cselib_subst_to_values (x
)
2718 enum rtx_code code
= GET_CODE (x
);
2719 const char *fmt
= GET_RTX_FORMAT (code
);
2728 for (l
= REG_VALUES (REGNO (x
)); l
; l
= l
->next
)
2729 if (GET_MODE (l
->elt
->u
.val_rtx
) == GET_MODE (x
))
2730 return l
->elt
->u
.val_rtx
;
2735 e
= cselib_lookup_mem (x
, 0);
2738 return e
->u
.val_rtx
;
2740 /* CONST_DOUBLEs must be special-cased here so that we won't try to
2741 look up the CONST_DOUBLE_MEM inside. */
2750 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2754 rtx t
= cselib_subst_to_values (XEXP (x
, i
));
2756 if (t
!= XEXP (x
, i
) && x
== copy
)
2757 copy
= shallow_copy_rtx (x
);
2761 else if (fmt
[i
] == 'E')
2765 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2767 rtx t
= cselib_subst_to_values (XVECEXP (x
, i
, j
));
2769 if (t
!= XVECEXP (x
, i
, j
) && XVEC (x
, i
) == XVEC (copy
, i
))
2772 copy
= shallow_copy_rtx (x
);
2774 XVEC (copy
, i
) = rtvec_alloc (XVECLEN (x
, i
));
2775 for (k
= 0; k
< j
; k
++)
2776 XVECEXP (copy
, i
, k
) = XVECEXP (x
, i
, k
);
2779 XVECEXP (copy
, i
, j
) = t
;
2787 /* Look up the rtl expression X in our tables and return the value it has.
2788 If CREATE is zero, we return NULL if we don't know the value. Otherwise,
2789 we create a new one if possible, using mode MODE if X doesn't have a mode
2790 (i.e. because it's a constant). */
2793 cselib_lookup (x
, mode
, create
)
2795 enum machine_mode mode
;
2800 unsigned int hashval
;
2802 if (GET_MODE (x
) != VOIDmode
)
2803 mode
= GET_MODE (x
);
2805 if (GET_CODE (x
) == VALUE
)
2806 return CSELIB_VAL_PTR (x
);
2808 if (GET_CODE (x
) == REG
)
2811 unsigned int i
= REGNO (x
);
2813 for (l
= REG_VALUES (i
); l
; l
= l
->next
)
2814 if (mode
== GET_MODE (l
->elt
->u
.val_rtx
))
2820 e
= new_cselib_val (++next_unknown_value
, GET_MODE (x
));
2821 e
->locs
= new_elt_loc_list (e
->locs
, x
);
2822 REG_VALUES (i
) = new_elt_list (REG_VALUES (i
), e
);
2823 slot
= htab_find_slot_with_hash (hash_table
, x
, e
->value
, INSERT
);
2828 if (GET_CODE (x
) == MEM
)
2829 return cselib_lookup_mem (x
, create
);
2831 hashval
= hash_rtx (x
, mode
, create
);
2832 /* Can't even create if hashing is not possible. */
2836 slot
= htab_find_slot_with_hash (hash_table
, x
, hashval
,
2837 create
? INSERT
: NO_INSERT
);
2841 e
= (cselib_val
*) *slot
;
2845 e
= new_cselib_val (hashval
, mode
);
2847 /* We have to fill the slot before calling cselib_subst_to_values:
2848 the hash table is inconsistent until we do so, and
2849 cselib_subst_to_values will need to do lookups. */
2851 e
->locs
= new_elt_loc_list (e
->locs
, cselib_subst_to_values (x
));
2855 /* Invalidate any entries in reg_values that overlap REGNO. This is called
2856 if REGNO is changing. MODE is the mode of the assignment to REGNO, which
2857 is used to determine how many hard registers are being changed. If MODE
2858 is VOIDmode, then only REGNO is being changed; this is used when
2859 invalidating call clobbered registers across a call. */
2862 cselib_invalidate_regno (regno
, mode
)
2864 enum machine_mode mode
;
2866 unsigned int endregno
;
2869 /* If we see pseudos after reload, something is _wrong_. */
2870 if (reload_completed
&& regno
>= FIRST_PSEUDO_REGISTER
2871 && reg_renumber
[regno
] >= 0)
2874 /* Determine the range of registers that must be invalidated. For
2875 pseudos, only REGNO is affected. For hard regs, we must take MODE
2876 into account, and we must also invalidate lower register numbers
2877 if they contain values that overlap REGNO. */
2878 endregno
= regno
+ 1;
2879 if (regno
< FIRST_PSEUDO_REGISTER
&& mode
!= VOIDmode
)
2880 endregno
= regno
+ HARD_REGNO_NREGS (regno
, mode
);
2882 for (i
= 0; i
< endregno
; i
++)
2884 struct elt_list
**l
= ®_VALUES (i
);
2886 /* Go through all known values for this reg; if it overlaps the range
2887 we're invalidating, remove the value. */
2890 cselib_val
*v
= (*l
)->elt
;
2891 struct elt_loc_list
**p
;
2892 unsigned int this_last
= i
;
2894 if (i
< FIRST_PSEUDO_REGISTER
)
2895 this_last
+= HARD_REGNO_NREGS (i
, GET_MODE (v
->u
.val_rtx
)) - 1;
2897 if (this_last
< regno
)
2903 /* We have an overlap. */
2904 unchain_one_elt_list (l
);
2906 /* Now, we clear the mapping from value to reg. It must exist, so
2907 this code will crash intentionally if it doesn't. */
2908 for (p
= &v
->locs
; ; p
= &(*p
)->next
)
2912 if (GET_CODE (x
) == REG
&& REGNO (x
) == i
)
2914 unchain_one_elt_loc_list (p
);
2924 /* The memory at address MEM_BASE is being changed.
2925 Return whether this change will invalidate VAL. */
2928 cselib_mem_conflict_p (mem_base
, val
)
2936 code
= GET_CODE (val
);
2939 /* Get rid of a few simple cases quickly. */
2952 if (GET_MODE (mem_base
) == BLKmode
2953 || GET_MODE (val
) == BLKmode
2954 || anti_dependence (val
, mem_base
))
2957 /* The address may contain nested MEMs. */
2964 fmt
= GET_RTX_FORMAT (code
);
2965 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2969 if (cselib_mem_conflict_p (mem_base
, XEXP (val
, i
)))
2972 else if (fmt
[i
] == 'E')
2973 for (j
= 0; j
< XVECLEN (val
, i
); j
++)
2974 if (cselib_mem_conflict_p (mem_base
, XVECEXP (val
, i
, j
)))
2981 /* For the value found in SLOT, walk its locations to determine if any overlap
2982 INFO (which is a MEM rtx). */
2985 cselib_invalidate_mem_1 (slot
, info
)
2989 cselib_val
*v
= (cselib_val
*) *slot
;
2990 rtx mem_rtx
= (rtx
) info
;
2991 struct elt_loc_list
**p
= &v
->locs
;
2992 int had_locs
= v
->locs
!= 0;
2998 struct elt_list
**mem_chain
;
3000 /* MEMs may occur in locations only at the top level; below
3001 that every MEM or REG is substituted by its VALUE. */
3002 if (GET_CODE (x
) != MEM
3003 || ! cselib_mem_conflict_p (mem_rtx
, x
))
3009 /* This one overlaps. */
3010 /* We must have a mapping from this MEM's address to the
3011 value (E). Remove that, too. */
3012 addr
= cselib_lookup (XEXP (x
, 0), VOIDmode
, 0);
3013 mem_chain
= &addr
->addr_list
;
3016 if ((*mem_chain
)->elt
== v
)
3018 unchain_one_elt_list (mem_chain
);
3022 mem_chain
= &(*mem_chain
)->next
;
3025 unchain_one_elt_loc_list (p
);
3028 if (had_locs
&& v
->locs
== 0)
3034 /* Invalidate any locations in the table which are changed because of a
3035 store to MEM_RTX. If this is called because of a non-const call
3036 instruction, MEM_RTX is (mem:BLK const0_rtx). */
3039 cselib_invalidate_mem (mem_rtx
)
3042 htab_traverse (hash_table
, cselib_invalidate_mem_1
, mem_rtx
);
3045 /* Invalidate DEST, which is being assigned to or clobbered. The second and
3046 the third parameter exist so that this function can be passed to
3047 note_stores; they are ignored. */
3050 cselib_invalidate_rtx (dest
, ignore
, data
)
3052 rtx ignore ATTRIBUTE_UNUSED
;
3053 void *data ATTRIBUTE_UNUSED
;
3055 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SIGN_EXTRACT
3056 || GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SUBREG
)
3057 dest
= XEXP (dest
, 0);
3059 if (GET_CODE (dest
) == REG
)
3060 cselib_invalidate_regno (REGNO (dest
), GET_MODE (dest
));
3061 else if (GET_CODE (dest
) == MEM
)
3062 cselib_invalidate_mem (dest
);
3064 /* Some machines don't define AUTO_INC_DEC, but they still use push
3065 instructions. We need to catch that case here in order to
3066 invalidate the stack pointer correctly. Note that invalidating
3067 the stack pointer is different from invalidating DEST. */
3068 if (push_operand (dest
, GET_MODE (dest
)))
3069 cselib_invalidate_rtx (stack_pointer_rtx
, NULL_RTX
, NULL
);
3072 /* Record the result of a SET instruction. DEST is being set; the source
3073 contains the value described by SRC_ELT. If DEST is a MEM, DEST_ADDR_ELT
3074 describes its address. */
3077 cselib_record_set (dest
, src_elt
, dest_addr_elt
)
3079 cselib_val
*src_elt
, *dest_addr_elt
;
3081 int dreg
= GET_CODE (dest
) == REG
? (int) REGNO (dest
) : -1;
3083 if (src_elt
== 0 || side_effects_p (dest
))
3088 REG_VALUES (dreg
) = new_elt_list (REG_VALUES (dreg
), src_elt
);
3089 if (src_elt
->locs
== 0)
3091 src_elt
->locs
= new_elt_loc_list (src_elt
->locs
, dest
);
3093 else if (GET_CODE (dest
) == MEM
&& dest_addr_elt
!= 0)
3095 if (src_elt
->locs
== 0)
3097 add_mem_for_addr (dest_addr_elt
, src_elt
, dest
);
3101 /* Describe a single set that is part of an insn. */
3106 cselib_val
*src_elt
;
3107 cselib_val
*dest_addr_elt
;
3110 /* There is no good way to determine how many elements there can be
3111 in a PARALLEL. Since it's fairly cheap, use a really large number. */
3112 #define MAX_SETS (FIRST_PSEUDO_REGISTER * 2)
3114 /* Record the effects of any sets in INSN. */
3116 cselib_record_sets (insn
)
3121 struct set sets
[MAX_SETS
];
3122 rtx body
= PATTERN (insn
);
3124 body
= PATTERN (insn
);
3125 /* Find all sets. */
3126 if (GET_CODE (body
) == SET
)
3128 sets
[0].src
= SET_SRC (body
);
3129 sets
[0].dest
= SET_DEST (body
);
3132 else if (GET_CODE (body
) == PARALLEL
)
3134 /* Look through the PARALLEL and record the values being
3135 set, if possible. Also handle any CLOBBERs. */
3136 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; --i
)
3138 rtx x
= XVECEXP (body
, 0, i
);
3140 if (GET_CODE (x
) == SET
)
3142 sets
[n_sets
].src
= SET_SRC (x
);
3143 sets
[n_sets
].dest
= SET_DEST (x
);
3149 /* Look up the values that are read. Do this before invalidating the
3150 locations that are written. */
3151 for (i
= 0; i
< n_sets
; i
++)
3153 sets
[i
].src_elt
= cselib_lookup (sets
[i
].src
, GET_MODE (sets
[i
].dest
),
3155 if (GET_CODE (sets
[i
].dest
) == MEM
)
3156 sets
[i
].dest_addr_elt
= cselib_lookup (XEXP (sets
[i
].dest
, 0), Pmode
,
3159 sets
[i
].dest_addr_elt
= 0;
3162 /* Invalidate all locations written by this insn. Note that the elts we
3163 looked up in the previous loop aren't affected, just some of their
3164 locations may go away. */
3165 note_stores (body
, cselib_invalidate_rtx
, NULL
);
3167 /* Now enter the equivalences in our tables. */
3168 for (i
= 0; i
< n_sets
; i
++)
3169 cselib_record_set (sets
[i
].dest
, sets
[i
].src_elt
, sets
[i
].dest_addr_elt
);
3172 /* Record the effects of INSN. */
3175 cselib_process_insn (insn
)
3181 cselib_current_insn
= insn
;
3183 /* Forget everything at a CODE_LABEL, a volatile asm, or a setjmp. */
3184 if (GET_CODE (insn
) == CODE_LABEL
3185 || (GET_CODE (insn
) == NOTE
3186 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_SETJMP
)
3187 || (GET_CODE (insn
) == INSN
3188 && GET_CODE (PATTERN (insn
)) == ASM_OPERANDS
3189 && MEM_VOLATILE_P (PATTERN (insn
))))
3195 if (! INSN_P (insn
))
3197 cselib_current_insn
= 0;
3201 /* If this is a call instruction, forget anything stored in a
3202 call clobbered register, or, if this is not a const call, in
3204 if (GET_CODE (insn
) == CALL_INSN
)
3206 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3207 if (call_used_regs
[i
])
3208 cselib_invalidate_regno (i
, VOIDmode
);
3210 if (! CONST_CALL_P (insn
))
3211 cselib_invalidate_mem (callmem
);
3214 cselib_record_sets (insn
);
3217 /* Clobber any registers which appear in REG_INC notes. We
3218 could keep track of the changes to their values, but it is
3219 unlikely to help. */
3220 for (x
= REG_NOTES (insn
); x
; x
= XEXP (x
, 1))
3221 if (REG_NOTE_KIND (x
) == REG_INC
)
3222 cselib_invalidate_rtx (XEXP (x
, 0), NULL_RTX
, NULL
);
3225 /* Look for any CLOBBERs in CALL_INSN_FUNCTION_USAGE, but only
3226 after we have processed the insn. */
3227 if (GET_CODE (insn
) == CALL_INSN
)
3228 for (x
= CALL_INSN_FUNCTION_USAGE (insn
); x
; x
= XEXP (x
, 1))
3229 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
3230 cselib_invalidate_rtx (XEXP (XEXP (x
, 0), 0), NULL_RTX
, NULL
);
3232 cselib_current_insn
= 0;
3234 if (n_useless_values
> MAX_USELESS_VALUES
)
3235 remove_useless_values ();
3238 /* Make sure our varrays are big enough. Not called from any cselib routines;
3239 it must be called by the user if it allocated new registers. */
3242 cselib_update_varray_sizes ()
3244 unsigned int nregs
= max_reg_num ();
3246 if (nregs
== cselib_nregs
)
3249 cselib_nregs
= nregs
;
3250 VARRAY_GROW (reg_values
, nregs
);
3253 /* Initialize cselib for one pass. The caller must also call
3254 init_alias_analysis. */
3259 /* These are only created once. */
3262 extern struct obstack permanent_obstack
;
3264 gcc_obstack_init (&cselib_obstack
);
3265 cselib_startobj
= obstack_alloc (&cselib_obstack
, 0);
3267 push_obstacks (&permanent_obstack
, &permanent_obstack
);
3268 callmem
= gen_rtx_MEM (BLKmode
, const0_rtx
);
3270 ggc_add_rtx_root (&callmem
, 1);
3273 cselib_nregs
= max_reg_num ();
3274 VARRAY_ELT_LIST_INIT (reg_values
, cselib_nregs
, "reg_values");
3275 hash_table
= htab_create (31, get_value_hash
, entry_and_rtx_equal_p
, NULL
);
3279 /* Called when the current user is done with cselib. */
3285 htab_delete (hash_table
);