1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
33 #include "insn-config.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
47 virtual regs here because the simplify_*_operation routines are called
48 by integrate.c, which is called before virtual register instantiation.
50 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
51 a header file so that their definitions can be shared with the
52 simplification routines in simplify-rtx.c. Until then, do not
53 change these macros without also changing the copy in simplify-rtx.c. */
55 #define FIXED_BASE_PLUS_P(X) \
56 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
57 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
58 || (X) == virtual_stack_vars_rtx \
59 || (X) == virtual_incoming_args_rtx \
60 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
61 && (XEXP (X, 0) == frame_pointer_rtx \
62 || XEXP (X, 0) == hard_frame_pointer_rtx \
63 || ((X) == arg_pointer_rtx \
64 && fixed_regs[ARG_POINTER_REGNUM]) \
65 || XEXP (X, 0) == virtual_stack_vars_rtx \
66 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
67 || GET_CODE (X) == ADDRESSOF)
69 /* Similar, but also allows reference to the stack pointer.
71 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
72 arg_pointer_rtx by itself is nonzero, because on at least one machine,
73 the i960, the arg pointer is zero when it is unused. */
75 #define NONZERO_BASE_PLUS_P(X) \
76 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
77 || (X) == virtual_stack_vars_rtx \
78 || (X) == virtual_incoming_args_rtx \
79 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
80 && (XEXP (X, 0) == frame_pointer_rtx \
81 || XEXP (X, 0) == hard_frame_pointer_rtx \
82 || ((X) == arg_pointer_rtx \
83 && fixed_regs[ARG_POINTER_REGNUM]) \
84 || XEXP (X, 0) == virtual_stack_vars_rtx \
85 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
86 || (X) == stack_pointer_rtx \
87 || (X) == virtual_stack_dynamic_rtx \
88 || (X) == virtual_outgoing_args_rtx \
89 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
90 && (XEXP (X, 0) == stack_pointer_rtx \
91 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
92 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
93 || GET_CODE (X) == ADDRESSOF)
95 /* Much code operates on (low, high) pairs; the low value is an
96 unsigned wide int, the high value a signed wide int. We
97 occasionally need to sign extend from low to high as if low were a
99 #define HWI_SIGN_EXTEND(low) \
100 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
102 static rtx simplify_plus_minus
PARAMS ((enum rtx_code
,
103 enum machine_mode
, rtx
, rtx
));
104 static void check_fold_consts
PARAMS ((PTR
));
105 static int entry_and_rtx_equal_p
PARAMS ((const void *, const void *));
106 static unsigned int get_value_hash
PARAMS ((const void *));
107 static struct elt_list
*new_elt_list
PARAMS ((struct elt_list
*,
109 static struct elt_loc_list
*new_elt_loc_list
PARAMS ((struct elt_loc_list
*,
111 static void unchain_one_value
PARAMS ((cselib_val
*));
112 static void unchain_one_elt_list
PARAMS ((struct elt_list
**));
113 static void unchain_one_elt_loc_list
PARAMS ((struct elt_loc_list
**));
114 static void clear_table
PARAMS ((void));
115 static int discard_useless_locs
PARAMS ((void **, void *));
116 static int discard_useless_values
PARAMS ((void **, void *));
117 static void remove_useless_values
PARAMS ((void));
118 static unsigned int hash_rtx
PARAMS ((rtx
, enum machine_mode
, int));
119 static cselib_val
*new_cselib_val
PARAMS ((unsigned int,
121 static void add_mem_for_addr
PARAMS ((cselib_val
*, cselib_val
*,
123 static cselib_val
*cselib_lookup_mem
PARAMS ((rtx
, int));
124 static rtx cselib_subst_to_values
PARAMS ((rtx
));
125 static void cselib_invalidate_regno
PARAMS ((unsigned int,
127 static int cselib_mem_conflict_p
PARAMS ((rtx
, rtx
));
128 static int cselib_invalidate_mem_1
PARAMS ((void **, void *));
129 static void cselib_invalidate_mem
PARAMS ((rtx
));
130 static void cselib_invalidate_rtx
PARAMS ((rtx
, rtx
, void *));
131 static void cselib_record_set
PARAMS ((rtx
, cselib_val
*,
133 static void cselib_record_sets
PARAMS ((rtx
));
135 /* There are three ways in which cselib can look up an rtx:
136 - for a REG, the reg_values table (which is indexed by regno) is used
137 - for a MEM, we recursively look up its address and then follow the
138 addr_list of that value
139 - for everything else, we compute a hash value and go through the hash
140 table. Since different rtx's can still have the same hash value,
141 this involves walking the table entries for a given value and comparing
142 the locations of the entries with the rtx we are looking up. */
144 /* A table that enables us to look up elts by their value. */
145 static htab_t hash_table
;
147 /* This is a global so we don't have to pass this through every function.
148 It is used in new_elt_loc_list to set SETTING_INSN. */
149 static rtx cselib_current_insn
;
151 /* Every new unknown value gets a unique number. */
152 static unsigned int next_unknown_value
;
154 /* The number of registers we had when the varrays were last resized. */
155 static unsigned int cselib_nregs
;
157 /* Count values without known locations. Whenever this grows too big, we
158 remove these useless values from the table. */
159 static int n_useless_values
;
161 /* Number of useless values before we remove them from the hash table. */
162 #define MAX_USELESS_VALUES 32
164 /* This table maps from register number to values. It does not contain
165 pointers to cselib_val structures, but rather elt_lists. The purpose is
166 to be able to refer to the same register in different modes. */
167 static varray_type reg_values
;
168 #define REG_VALUES(I) VARRAY_ELT_LIST (reg_values, (I))
170 /* We pass this to cselib_invalidate_mem to invalidate all of
171 memory for a non-const call instruction. */
174 /* Memory for our structures is allocated from this obstack. */
175 static struct obstack cselib_obstack
;
177 /* Used to quickly free all memory. */
178 static char *cselib_startobj
;
180 /* Caches for unused structures. */
181 static cselib_val
*empty_vals
;
182 static struct elt_list
*empty_elt_lists
;
183 static struct elt_loc_list
*empty_elt_loc_lists
;
185 /* Set by discard_useless_locs if it deleted the last location of any
187 static int values_became_useless
;
189 /* Make a binary operation by properly ordering the operands and
190 seeing if the expression folds. */
193 simplify_gen_binary (code
, mode
, op0
, op1
)
195 enum machine_mode mode
;
200 /* Put complex operands first and constants second if commutative. */
201 if (GET_RTX_CLASS (code
) == 'c'
202 && ((CONSTANT_P (op0
) && GET_CODE (op1
) != CONST_INT
)
203 || (GET_RTX_CLASS (GET_CODE (op0
)) == 'o'
204 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')
205 || (GET_CODE (op0
) == SUBREG
206 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0
))) == 'o'
207 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')))
208 tem
= op0
, op0
= op1
, op1
= tem
;
210 /* If this simplifies, do it. */
211 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
216 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
217 just form the operation. */
219 if (code
== PLUS
&& GET_CODE (op1
) == CONST_INT
220 && GET_MODE (op0
) != VOIDmode
)
221 return plus_constant (op0
, INTVAL (op1
));
222 else if (code
== MINUS
&& GET_CODE (op1
) == CONST_INT
223 && GET_MODE (op0
) != VOIDmode
)
224 return plus_constant (op0
, - INTVAL (op1
));
226 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
229 /* Try to simplify a unary operation CODE whose output mode is to be
230 MODE with input operand OP whose mode was originally OP_MODE.
231 Return zero if no simplification can be made. */
234 simplify_unary_operation (code
, mode
, op
, op_mode
)
236 enum machine_mode mode
;
238 enum machine_mode op_mode
;
240 unsigned int width
= GET_MODE_BITSIZE (mode
);
242 /* The order of these tests is critical so that, for example, we don't
243 check the wrong mode (input vs. output) for a conversion operation,
244 such as FIX. At some point, this should be simplified. */
246 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
248 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
249 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
251 HOST_WIDE_INT hv
, lv
;
254 if (GET_CODE (op
) == CONST_INT
)
255 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
257 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
259 #ifdef REAL_ARITHMETIC
260 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
265 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
266 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
267 d
+= (double) (unsigned HOST_WIDE_INT
) (~ lv
);
273 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
274 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
275 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
277 #endif /* REAL_ARITHMETIC */
278 d
= real_value_truncate (mode
, d
);
279 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
281 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
282 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
284 HOST_WIDE_INT hv
, lv
;
287 if (GET_CODE (op
) == CONST_INT
)
288 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
290 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
292 if (op_mode
== VOIDmode
)
294 /* We don't know how to interpret negative-looking numbers in
295 this case, so don't try to fold those. */
299 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
302 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
304 #ifdef REAL_ARITHMETIC
305 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
308 d
= (double) (unsigned HOST_WIDE_INT
) hv
;
309 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
310 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
311 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
312 #endif /* REAL_ARITHMETIC */
313 d
= real_value_truncate (mode
, d
);
314 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
318 if (GET_CODE (op
) == CONST_INT
319 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
321 register HOST_WIDE_INT arg0
= INTVAL (op
);
322 register HOST_WIDE_INT val
;
335 val
= (arg0
>= 0 ? arg0
: - arg0
);
339 /* Don't use ffs here. Instead, get low order bit and then its
340 number. If arg0 is zero, this will return 0, as desired. */
341 arg0
&= GET_MODE_MASK (mode
);
342 val
= exact_log2 (arg0
& (- arg0
)) + 1;
350 if (op_mode
== VOIDmode
)
352 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
354 /* If we were really extending the mode,
355 we would have to distinguish between zero-extension
356 and sign-extension. */
357 if (width
!= GET_MODE_BITSIZE (op_mode
))
361 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
362 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
368 if (op_mode
== VOIDmode
)
370 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
372 /* If we were really extending the mode,
373 we would have to distinguish between zero-extension
374 and sign-extension. */
375 if (width
!= GET_MODE_BITSIZE (op_mode
))
379 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
382 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
384 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
385 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
398 val
= trunc_int_for_mode (val
, mode
);
400 return GEN_INT (val
);
403 /* We can do some operations on integer CONST_DOUBLEs. Also allow
404 for a DImode operation on a CONST_INT. */
405 else if (GET_MODE (op
) == VOIDmode
&& width
<= HOST_BITS_PER_INT
* 2
406 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
408 unsigned HOST_WIDE_INT l1
, lv
;
409 HOST_WIDE_INT h1
, hv
;
411 if (GET_CODE (op
) == CONST_DOUBLE
)
412 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
414 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
424 neg_double (l1
, h1
, &lv
, &hv
);
429 neg_double (l1
, h1
, &lv
, &hv
);
437 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& (-h1
)) + 1;
439 lv
= exact_log2 (l1
& (-l1
)) + 1;
443 /* This is just a change-of-mode, so do nothing. */
448 if (op_mode
== VOIDmode
449 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
453 lv
= l1
& GET_MODE_MASK (op_mode
);
457 if (op_mode
== VOIDmode
458 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
462 lv
= l1
& GET_MODE_MASK (op_mode
);
463 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
464 && (lv
& ((HOST_WIDE_INT
) 1
465 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
466 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
468 hv
= HWI_SIGN_EXTEND (lv
);
479 return immed_double_const (lv
, hv
, mode
);
482 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
483 else if (GET_CODE (op
) == CONST_DOUBLE
484 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
490 if (setjmp (handler
))
491 /* There used to be a warning here, but that is inadvisable.
492 People may want to cause traps, and the natural way
493 to do it should not get a warning. */
496 set_float_handler (handler
);
498 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
503 d
= REAL_VALUE_NEGATE (d
);
507 if (REAL_VALUE_NEGATIVE (d
))
508 d
= REAL_VALUE_NEGATE (d
);
512 d
= real_value_truncate (mode
, d
);
516 /* All this does is change the mode. */
520 d
= REAL_VALUE_RNDZINT (d
);
524 d
= REAL_VALUE_UNSIGNED_RNDZINT (d
);
534 x
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
535 set_float_handler (NULL_PTR
);
539 else if (GET_CODE (op
) == CONST_DOUBLE
540 && GET_MODE_CLASS (GET_MODE (op
)) == MODE_FLOAT
541 && GET_MODE_CLASS (mode
) == MODE_INT
542 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
548 if (setjmp (handler
))
551 set_float_handler (handler
);
553 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
558 val
= REAL_VALUE_FIX (d
);
562 val
= REAL_VALUE_UNSIGNED_FIX (d
);
569 set_float_handler (NULL_PTR
);
571 val
= trunc_int_for_mode (val
, mode
);
573 return GEN_INT (val
);
576 /* This was formerly used only for non-IEEE float.
577 eggert@twinsun.com says it is safe for IEEE also. */
580 /* There are some simplifications we can do even if the operands
586 /* (not (not X)) == X, similarly for NEG. */
587 if (GET_CODE (op
) == code
)
592 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
593 becomes just the MINUS if its mode is MODE. This allows
594 folding switch statements on machines using casesi (such as
596 if (GET_CODE (op
) == TRUNCATE
597 && GET_MODE (XEXP (op
, 0)) == mode
598 && GET_CODE (XEXP (op
, 0)) == MINUS
599 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
600 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
603 #ifdef POINTERS_EXTEND_UNSIGNED
604 if (! POINTERS_EXTEND_UNSIGNED
605 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
607 return convert_memory_address (Pmode
, op
);
611 #ifdef POINTERS_EXTEND_UNSIGNED
613 if (POINTERS_EXTEND_UNSIGNED
614 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
616 return convert_memory_address (Pmode
, op
);
628 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
629 and OP1. Return 0 if no simplification is possible.
631 Don't use this for relational operations such as EQ or LT.
632 Use simplify_relational_operation instead. */
635 simplify_binary_operation (code
, mode
, op0
, op1
)
637 enum machine_mode mode
;
640 register HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
642 unsigned int width
= GET_MODE_BITSIZE (mode
);
645 /* Relational operations don't work here. We must know the mode
646 of the operands in order to do the comparison correctly.
647 Assuming a full word can give incorrect results.
648 Consider comparing 128 with -128 in QImode. */
650 if (GET_RTX_CLASS (code
) == '<')
653 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
654 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
655 && GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
656 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
658 REAL_VALUE_TYPE f0
, f1
, value
;
661 if (setjmp (handler
))
664 set_float_handler (handler
);
666 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
667 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
668 f0
= real_value_truncate (mode
, f0
);
669 f1
= real_value_truncate (mode
, f1
);
671 #ifdef REAL_ARITHMETIC
672 #ifndef REAL_INFINITY
673 if (code
== DIV
&& REAL_VALUES_EQUAL (f1
, dconst0
))
676 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
690 #ifndef REAL_INFINITY
697 value
= MIN (f0
, f1
);
700 value
= MAX (f0
, f1
);
707 value
= real_value_truncate (mode
, value
);
708 set_float_handler (NULL_PTR
);
709 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
711 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
713 /* We can fold some multi-word operations. */
714 if (GET_MODE_CLASS (mode
) == MODE_INT
715 && width
== HOST_BITS_PER_WIDE_INT
* 2
716 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
717 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
719 unsigned HOST_WIDE_INT l1
, l2
, lv
;
720 HOST_WIDE_INT h1
, h2
, hv
;
722 if (GET_CODE (op0
) == CONST_DOUBLE
)
723 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
725 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
727 if (GET_CODE (op1
) == CONST_DOUBLE
)
728 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
730 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
735 /* A - B == A + (-B). */
736 neg_double (l2
, h2
, &lv
, &hv
);
739 /* .. fall through ... */
742 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
746 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
749 case DIV
: case MOD
: case UDIV
: case UMOD
:
750 /* We'd need to include tree.h to do this and it doesn't seem worth
755 lv
= l1
& l2
, hv
= h1
& h2
;
759 lv
= l1
| l2
, hv
= h1
| h2
;
763 lv
= l1
^ l2
, hv
= h1
^ h2
;
769 && ((unsigned HOST_WIDE_INT
) l1
770 < (unsigned HOST_WIDE_INT
) l2
)))
779 && ((unsigned HOST_WIDE_INT
) l1
780 > (unsigned HOST_WIDE_INT
) l2
)))
787 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
789 && ((unsigned HOST_WIDE_INT
) l1
790 < (unsigned HOST_WIDE_INT
) l2
)))
797 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
799 && ((unsigned HOST_WIDE_INT
) l1
800 > (unsigned HOST_WIDE_INT
) l2
)))
806 case LSHIFTRT
: case ASHIFTRT
:
808 case ROTATE
: case ROTATERT
:
809 #ifdef SHIFT_COUNT_TRUNCATED
810 if (SHIFT_COUNT_TRUNCATED
)
811 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
814 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
817 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
818 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
820 else if (code
== ASHIFT
)
821 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
822 else if (code
== ROTATE
)
823 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
824 else /* code == ROTATERT */
825 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
832 return immed_double_const (lv
, hv
, mode
);
835 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
836 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
838 /* Even if we can't compute a constant result,
839 there are some cases worth simplifying. */
844 /* In IEEE floating point, x+0 is not the same as x. Similarly
845 for the other optimizations below. */
846 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
847 && FLOAT_MODE_P (mode
) && ! flag_fast_math
)
850 if (op1
== CONST0_RTX (mode
))
853 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
854 if (GET_CODE (op0
) == NEG
)
855 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
856 else if (GET_CODE (op1
) == NEG
)
857 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
859 /* Handle both-operands-constant cases. We can only add
860 CONST_INTs to constants since the sum of relocatable symbols
861 can't be handled by most assemblers. Don't add CONST_INT
862 to CONST_INT since overflow won't be computed properly if wider
863 than HOST_BITS_PER_WIDE_INT. */
865 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
866 && GET_CODE (op1
) == CONST_INT
)
867 return plus_constant (op0
, INTVAL (op1
));
868 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
869 && GET_CODE (op0
) == CONST_INT
)
870 return plus_constant (op1
, INTVAL (op0
));
872 /* See if this is something like X * C - X or vice versa or
873 if the multiplication is written as a shift. If so, we can
874 distribute and make a new multiply, shift, or maybe just
875 have X (if C is 2 in the example above). But don't make
876 real multiply if we didn't have one before. */
878 if (! FLOAT_MODE_P (mode
))
880 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
881 rtx lhs
= op0
, rhs
= op1
;
884 if (GET_CODE (lhs
) == NEG
)
885 coeff0
= -1, lhs
= XEXP (lhs
, 0);
886 else if (GET_CODE (lhs
) == MULT
887 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
889 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
892 else if (GET_CODE (lhs
) == ASHIFT
893 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
894 && INTVAL (XEXP (lhs
, 1)) >= 0
895 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
897 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
901 if (GET_CODE (rhs
) == NEG
)
902 coeff1
= -1, rhs
= XEXP (rhs
, 0);
903 else if (GET_CODE (rhs
) == MULT
904 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
906 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
909 else if (GET_CODE (rhs
) == ASHIFT
910 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
911 && INTVAL (XEXP (rhs
, 1)) >= 0
912 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
914 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
918 if (rtx_equal_p (lhs
, rhs
))
920 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
921 GEN_INT (coeff0
+ coeff1
));
922 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
926 /* If one of the operands is a PLUS or a MINUS, see if we can
927 simplify this by the associative law.
928 Don't use the associative law for floating point.
929 The inaccuracy makes it nonassociative,
930 and subtle programs can break if operations are associated. */
932 if (INTEGRAL_MODE_P (mode
)
933 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
934 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
935 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
941 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
942 using cc0, in which case we want to leave it as a COMPARE
943 so we can distinguish it from a register-register-copy.
945 In IEEE floating point, x-0 is not the same as x. */
947 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
948 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
949 && op1
== CONST0_RTX (mode
))
953 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
954 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
955 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
956 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
958 rtx xop00
= XEXP (op0
, 0);
959 rtx xop10
= XEXP (op1
, 0);
962 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
964 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
965 && GET_MODE (xop00
) == GET_MODE (xop10
)
966 && REGNO (xop00
) == REGNO (xop10
)
967 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
968 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
975 /* None of these optimizations can be done for IEEE
977 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
978 && FLOAT_MODE_P (mode
) && ! flag_fast_math
)
981 /* We can't assume x-x is 0 even with non-IEEE floating point,
982 but since it is zero except in very strange circumstances, we
983 will treat it as zero with -ffast-math. */
984 if (rtx_equal_p (op0
, op1
)
985 && ! side_effects_p (op0
)
986 && (! FLOAT_MODE_P (mode
) || flag_fast_math
))
987 return CONST0_RTX (mode
);
989 /* Change subtraction from zero into negation. */
990 if (op0
== CONST0_RTX (mode
))
991 return gen_rtx_NEG (mode
, op1
);
993 /* (-1 - a) is ~a. */
994 if (op0
== constm1_rtx
)
995 return gen_rtx_NOT (mode
, op1
);
997 /* Subtracting 0 has no effect. */
998 if (op1
== CONST0_RTX (mode
))
1001 /* See if this is something like X * C - X or vice versa or
1002 if the multiplication is written as a shift. If so, we can
1003 distribute and make a new multiply, shift, or maybe just
1004 have X (if C is 2 in the example above). But don't make
1005 real multiply if we didn't have one before. */
1007 if (! FLOAT_MODE_P (mode
))
1009 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1010 rtx lhs
= op0
, rhs
= op1
;
1013 if (GET_CODE (lhs
) == NEG
)
1014 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1015 else if (GET_CODE (lhs
) == MULT
1016 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1018 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1021 else if (GET_CODE (lhs
) == ASHIFT
1022 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1023 && INTVAL (XEXP (lhs
, 1)) >= 0
1024 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1026 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1027 lhs
= XEXP (lhs
, 0);
1030 if (GET_CODE (rhs
) == NEG
)
1031 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1032 else if (GET_CODE (rhs
) == MULT
1033 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1035 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1038 else if (GET_CODE (rhs
) == ASHIFT
1039 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1040 && INTVAL (XEXP (rhs
, 1)) >= 0
1041 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1043 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1044 rhs
= XEXP (rhs
, 0);
1047 if (rtx_equal_p (lhs
, rhs
))
1049 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1050 GEN_INT (coeff0
- coeff1
));
1051 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1055 /* (a - (-b)) -> (a + b). */
1056 if (GET_CODE (op1
) == NEG
)
1057 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1059 /* If one of the operands is a PLUS or a MINUS, see if we can
1060 simplify this by the associative law.
1061 Don't use the associative law for floating point.
1062 The inaccuracy makes it nonassociative,
1063 and subtle programs can break if operations are associated. */
1065 if (INTEGRAL_MODE_P (mode
)
1066 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1067 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
1068 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1071 /* Don't let a relocatable value get a negative coeff. */
1072 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1073 return plus_constant (op0
, - INTVAL (op1
));
1075 /* (x - (x & y)) -> (x & ~y) */
1076 if (GET_CODE (op1
) == AND
)
1078 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1079 return simplify_gen_binary (AND
, mode
, op0
,
1080 gen_rtx_NOT (mode
, XEXP (op1
, 1)));
1081 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1082 return simplify_gen_binary (AND
, mode
, op0
,
1083 gen_rtx_NOT (mode
, XEXP (op1
, 0)));
1088 if (op1
== constm1_rtx
)
1090 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1092 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1095 /* In IEEE floating point, x*0 is not always 0. */
1096 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1097 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
1098 && op1
== CONST0_RTX (mode
)
1099 && ! side_effects_p (op0
))
1102 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1103 However, ANSI says we can drop signals,
1104 so we can do this anyway. */
1105 if (op1
== CONST1_RTX (mode
))
1108 /* Convert multiply by constant power of two into shift unless
1109 we are still generating RTL. This test is a kludge. */
1110 if (GET_CODE (op1
) == CONST_INT
1111 && (val
= exact_log2 (INTVAL (op1
))) >= 0
1112 /* If the mode is larger than the host word size, and the
1113 uppermost bit is set, then this isn't a power of two due
1114 to implicit sign extension. */
1115 && (width
<= HOST_BITS_PER_WIDE_INT
1116 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1117 && ! rtx_equal_function_value_matters
)
1118 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1120 if (GET_CODE (op1
) == CONST_DOUBLE
1121 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
)
1125 int op1is2
, op1ism1
;
1127 if (setjmp (handler
))
1130 set_float_handler (handler
);
1131 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1132 op1is2
= REAL_VALUES_EQUAL (d
, dconst2
);
1133 op1ism1
= REAL_VALUES_EQUAL (d
, dconstm1
);
1134 set_float_handler (NULL_PTR
);
1136 /* x*2 is x+x and x*(-1) is -x */
1137 if (op1is2
&& GET_MODE (op0
) == mode
)
1138 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1140 else if (op1ism1
&& GET_MODE (op0
) == mode
)
1141 return gen_rtx_NEG (mode
, op0
);
1146 if (op1
== const0_rtx
)
1148 if (GET_CODE (op1
) == CONST_INT
1149 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1151 if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1153 /* A | (~A) -> -1 */
1154 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1155 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1156 && ! side_effects_p (op0
)
1157 && GET_MODE_CLASS (mode
) != MODE_CC
)
1162 if (op1
== const0_rtx
)
1164 if (GET_CODE (op1
) == CONST_INT
1165 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1166 return gen_rtx_NOT (mode
, op0
);
1167 if (op0
== op1
&& ! side_effects_p (op0
)
1168 && GET_MODE_CLASS (mode
) != MODE_CC
)
1173 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1175 if (GET_CODE (op1
) == CONST_INT
1176 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1178 if (op0
== op1
&& ! side_effects_p (op0
)
1179 && GET_MODE_CLASS (mode
) != MODE_CC
)
1182 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1183 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1184 && ! side_effects_p (op0
)
1185 && GET_MODE_CLASS (mode
) != MODE_CC
)
1190 /* Convert divide by power of two into shift (divide by 1 handled
1192 if (GET_CODE (op1
) == CONST_INT
1193 && (arg1
= exact_log2 (INTVAL (op1
))) > 0)
1194 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1196 /* ... fall through ... */
1199 if (op1
== CONST1_RTX (mode
))
1202 /* In IEEE floating point, 0/x is not always 0. */
1203 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1204 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
1205 && op0
== CONST0_RTX (mode
)
1206 && ! side_effects_p (op1
))
1209 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1210 /* Change division by a constant into multiplication. Only do
1211 this with -ffast-math until an expert says it is safe in
1213 else if (GET_CODE (op1
) == CONST_DOUBLE
1214 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
1215 && op1
!= CONST0_RTX (mode
)
1219 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1221 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1223 #if defined (REAL_ARITHMETIC)
1224 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1225 return gen_rtx_MULT (mode
, op0
,
1226 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1229 gen_rtx_MULT (mode
, op0
,
1230 CONST_DOUBLE_FROM_REAL_VALUE (1./d
, mode
));
1238 /* Handle modulus by power of two (mod with 1 handled below). */
1239 if (GET_CODE (op1
) == CONST_INT
1240 && exact_log2 (INTVAL (op1
)) > 0)
1241 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1243 /* ... fall through ... */
1246 if ((op0
== const0_rtx
|| op1
== const1_rtx
)
1247 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1253 /* Rotating ~0 always results in ~0. */
1254 if (GET_CODE (op0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1255 && (unsigned HOST_WIDE_INT
) INTVAL (op0
) == GET_MODE_MASK (mode
)
1256 && ! side_effects_p (op1
))
1259 /* ... fall through ... */
1264 if (op1
== const0_rtx
)
1266 if (op0
== const0_rtx
&& ! side_effects_p (op1
))
1271 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1272 && INTVAL (op1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1273 && ! side_effects_p (op0
))
1275 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1280 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1281 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
1282 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1283 && ! side_effects_p (op0
))
1285 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1290 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1292 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1297 if (op1
== constm1_rtx
&& ! side_effects_p (op0
))
1299 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1310 /* Get the integer argument values in two forms:
1311 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1313 arg0
= INTVAL (op0
);
1314 arg1
= INTVAL (op1
);
1316 if (width
< HOST_BITS_PER_WIDE_INT
)
1318 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1319 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1322 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1323 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1326 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1327 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1335 /* Compute the value of the arithmetic. */
1340 val
= arg0s
+ arg1s
;
1344 val
= arg0s
- arg1s
;
1348 val
= arg0s
* arg1s
;
1354 val
= arg0s
/ arg1s
;
1360 val
= arg0s
% arg1s
;
1366 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1372 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1388 /* If shift count is undefined, don't fold it; let the machine do
1389 what it wants. But truncate it if the machine will do that. */
1393 #ifdef SHIFT_COUNT_TRUNCATED
1394 if (SHIFT_COUNT_TRUNCATED
)
1398 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1405 #ifdef SHIFT_COUNT_TRUNCATED
1406 if (SHIFT_COUNT_TRUNCATED
)
1410 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1417 #ifdef SHIFT_COUNT_TRUNCATED
1418 if (SHIFT_COUNT_TRUNCATED
)
1422 val
= arg0s
>> arg1
;
1424 /* Bootstrap compiler may not have sign extended the right shift.
1425 Manually extend the sign to insure bootstrap cc matches gcc. */
1426 if (arg0s
< 0 && arg1
> 0)
1427 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1436 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1437 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1445 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1446 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1450 /* Do nothing here. */
1454 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1458 val
= ((unsigned HOST_WIDE_INT
) arg0
1459 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1463 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1467 val
= ((unsigned HOST_WIDE_INT
) arg0
1468 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1475 val
= trunc_int_for_mode (val
, mode
);
1477 return GEN_INT (val
);
1480 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1483 Rather than test for specific case, we do this by a brute-force method
1484 and do all possible simplifications until no more changes occur. Then
1485 we rebuild the operation. */
1488 simplify_plus_minus (code
, mode
, op0
, op1
)
1490 enum machine_mode mode
;
1496 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
= 0;
1497 int first
= 1, negate
= 0, changed
;
1500 bzero ((char *) ops
, sizeof ops
);
1502 /* Set up the two operands and then expand them until nothing has been
1503 changed. If we run out of room in our array, give up; this should
1504 almost never happen. */
1506 ops
[0] = op0
, ops
[1] = op1
, negs
[0] = 0, negs
[1] = (code
== MINUS
);
1513 for (i
= 0; i
< n_ops
; i
++)
1514 switch (GET_CODE (ops
[i
]))
1521 ops
[n_ops
] = XEXP (ops
[i
], 1);
1522 negs
[n_ops
++] = GET_CODE (ops
[i
]) == MINUS
? !negs
[i
] : negs
[i
];
1523 ops
[i
] = XEXP (ops
[i
], 0);
1529 ops
[i
] = XEXP (ops
[i
], 0);
1530 negs
[i
] = ! negs
[i
];
1535 ops
[i
] = XEXP (ops
[i
], 0);
1541 /* ~a -> (-a - 1) */
1544 ops
[n_ops
] = constm1_rtx
;
1545 negs
[n_ops
++] = negs
[i
];
1546 ops
[i
] = XEXP (ops
[i
], 0);
1547 negs
[i
] = ! negs
[i
];
1554 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0, changed
= 1;
1562 /* If we only have two operands, we can't do anything. */
1566 /* Now simplify each pair of operands until nothing changes. The first
1567 time through just simplify constants against each other. */
1574 for (i
= 0; i
< n_ops
- 1; i
++)
1575 for (j
= i
+ 1; j
< n_ops
; j
++)
1576 if (ops
[i
] != 0 && ops
[j
] != 0
1577 && (! first
|| (CONSTANT_P (ops
[i
]) && CONSTANT_P (ops
[j
]))))
1579 rtx lhs
= ops
[i
], rhs
= ops
[j
];
1580 enum rtx_code ncode
= PLUS
;
1582 if (negs
[i
] && ! negs
[j
])
1583 lhs
= ops
[j
], rhs
= ops
[i
], ncode
= MINUS
;
1584 else if (! negs
[i
] && negs
[j
])
1587 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
1590 ops
[i
] = tem
, ops
[j
] = 0;
1591 negs
[i
] = negs
[i
] && negs
[j
];
1592 if (GET_CODE (tem
) == NEG
)
1593 ops
[i
] = XEXP (tem
, 0), negs
[i
] = ! negs
[i
];
1595 if (GET_CODE (ops
[i
]) == CONST_INT
&& negs
[i
])
1596 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0;
1604 /* Pack all the operands to the lower-numbered entries and give up if
1605 we didn't reduce the number of operands we had. Make sure we
1606 count a CONST as two operands. If we have the same number of
1607 operands, but have made more CONSTs than we had, this is also
1608 an improvement, so accept it. */
1610 for (i
= 0, j
= 0; j
< n_ops
; j
++)
1613 ops
[i
] = ops
[j
], negs
[i
++] = negs
[j
];
1614 if (GET_CODE (ops
[j
]) == CONST
)
1618 if (i
+ n_consts
> input_ops
1619 || (i
+ n_consts
== input_ops
&& n_consts
<= input_consts
))
1624 /* If we have a CONST_INT, put it last. */
1625 for (i
= 0; i
< n_ops
- 1; i
++)
1626 if (GET_CODE (ops
[i
]) == CONST_INT
)
1628 tem
= ops
[n_ops
- 1], ops
[n_ops
- 1] = ops
[i
] , ops
[i
] = tem
;
1629 j
= negs
[n_ops
- 1], negs
[n_ops
- 1] = negs
[i
], negs
[i
] = j
;
1632 /* Put a non-negated operand first. If there aren't any, make all
1633 operands positive and negate the whole thing later. */
1634 for (i
= 0; i
< n_ops
&& negs
[i
]; i
++)
1639 for (i
= 0; i
< n_ops
; i
++)
1645 tem
= ops
[0], ops
[0] = ops
[i
], ops
[i
] = tem
;
1646 j
= negs
[0], negs
[0] = negs
[i
], negs
[i
] = j
;
1649 /* Now make the result by performing the requested operations. */
1651 for (i
= 1; i
< n_ops
; i
++)
1652 result
= simplify_gen_binary (negs
[i
] ? MINUS
: PLUS
, mode
, result
, ops
[i
]);
1654 return negate
? gen_rtx_NEG (mode
, result
) : result
;
1659 rtx op0
, op1
; /* Input */
1660 int equal
, op0lt
, op1lt
; /* Output */
1664 check_fold_consts (data
)
1667 struct cfc_args
*args
= (struct cfc_args
*) data
;
1668 REAL_VALUE_TYPE d0
, d1
;
1670 REAL_VALUE_FROM_CONST_DOUBLE (d0
, args
->op0
);
1671 REAL_VALUE_FROM_CONST_DOUBLE (d1
, args
->op1
);
1672 args
->equal
= REAL_VALUES_EQUAL (d0
, d1
);
1673 args
->op0lt
= REAL_VALUES_LESS (d0
, d1
);
1674 args
->op1lt
= REAL_VALUES_LESS (d1
, d0
);
1677 /* Like simplify_binary_operation except used for relational operators.
1678 MODE is the mode of the operands, not that of the result. If MODE
1679 is VOIDmode, both operands must also be VOIDmode and we compare the
1680 operands in "infinite precision".
1682 If no simplification is possible, this function returns zero. Otherwise,
1683 it returns either const_true_rtx or const0_rtx. */
1686 simplify_relational_operation (code
, mode
, op0
, op1
)
1688 enum machine_mode mode
;
1691 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
1694 /* If op0 is a compare, extract the comparison arguments from it. */
1695 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
1696 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
1698 /* We can't simplify MODE_CC values since we don't know what the
1699 actual comparison is. */
1700 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
1707 /* Make sure the constant is second. */
1708 if ((CONSTANT_P (op0
) && ! CONSTANT_P (op1
))
1709 || (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) != CONST_INT
))
1711 tem
= op0
, op0
= op1
, op1
= tem
;
1712 code
= swap_condition (code
);
1715 /* For integer comparisons of A and B maybe we can simplify A - B and can
1716 then simplify a comparison of that with zero. If A and B are both either
1717 a register or a CONST_INT, this can't help; testing for these cases will
1718 prevent infinite recursion here and speed things up.
1720 If CODE is an unsigned comparison, then we can never do this optimization,
1721 because it gives an incorrect result if the subtraction wraps around zero.
1722 ANSI C defines unsigned operations such that they never overflow, and
1723 thus such cases can not be ignored. */
1725 if (INTEGRAL_MODE_P (mode
) && op1
!= const0_rtx
1726 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == CONST_INT
)
1727 && (GET_CODE (op1
) == REG
|| GET_CODE (op1
) == CONST_INT
))
1728 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
1729 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
1730 return simplify_relational_operation (signed_condition (code
),
1731 mode
, tem
, const0_rtx
);
1733 /* For non-IEEE floating-point, if the two operands are equal, we know the
1735 if (rtx_equal_p (op0
, op1
)
1736 && (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1737 || ! FLOAT_MODE_P (GET_MODE (op0
)) || flag_fast_math
))
1738 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
1740 /* If the operands are floating-point constants, see if we can fold
1742 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1743 else if (GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
1744 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
1746 struct cfc_args args
;
1748 /* Setup input for check_fold_consts() */
1752 if (do_float_handler(check_fold_consts
, (PTR
) &args
) == 0)
1753 /* We got an exception from check_fold_consts() */
1756 /* Receive output from check_fold_consts() */
1758 op0lt
= op0ltu
= args
.op0lt
;
1759 op1lt
= op1ltu
= args
.op1lt
;
1761 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1763 /* Otherwise, see if the operands are both integers. */
1764 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
1765 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
1766 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
1768 int width
= GET_MODE_BITSIZE (mode
);
1769 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
1770 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
1772 /* Get the two words comprising each integer constant. */
1773 if (GET_CODE (op0
) == CONST_DOUBLE
)
1775 l0u
= l0s
= CONST_DOUBLE_LOW (op0
);
1776 h0u
= h0s
= CONST_DOUBLE_HIGH (op0
);
1780 l0u
= l0s
= INTVAL (op0
);
1781 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
1784 if (GET_CODE (op1
) == CONST_DOUBLE
)
1786 l1u
= l1s
= CONST_DOUBLE_LOW (op1
);
1787 h1u
= h1s
= CONST_DOUBLE_HIGH (op1
);
1791 l1u
= l1s
= INTVAL (op1
);
1792 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
1795 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1796 we have to sign or zero-extend the values. */
1797 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
1798 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
1800 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
1802 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1803 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1805 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1806 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1808 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1809 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1812 equal
= (h0u
== h1u
&& l0u
== l1u
);
1813 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
1814 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
1815 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
1816 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
1819 /* Otherwise, there are some code-specific tests we can make. */
1825 /* References to the frame plus a constant or labels cannot
1826 be zero, but a SYMBOL_REF can due to #pragma weak. */
1827 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1828 || GET_CODE (op0
) == LABEL_REF
)
1829 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1830 /* On some machines, the ap reg can be 0 sometimes. */
1831 && op0
!= arg_pointer_rtx
1838 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1839 || GET_CODE (op0
) == LABEL_REF
)
1840 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1841 && op0
!= arg_pointer_rtx
1844 return const_true_rtx
;
1848 /* Unsigned values are never negative. */
1849 if (op1
== const0_rtx
)
1850 return const_true_rtx
;
1854 if (op1
== const0_rtx
)
1859 /* Unsigned values are never greater than the largest
1861 if (GET_CODE (op1
) == CONST_INT
1862 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
1863 && INTEGRAL_MODE_P (mode
))
1864 return const_true_rtx
;
1868 if (GET_CODE (op1
) == CONST_INT
1869 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
1870 && INTEGRAL_MODE_P (mode
))
1881 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
1886 return equal
? const_true_rtx
: const0_rtx
;
1888 return ! equal
? const_true_rtx
: const0_rtx
;
1890 return op0lt
? const_true_rtx
: const0_rtx
;
1892 return op1lt
? const_true_rtx
: const0_rtx
;
1894 return op0ltu
? const_true_rtx
: const0_rtx
;
1896 return op1ltu
? const_true_rtx
: const0_rtx
;
1898 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
1900 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
1902 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
1904 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
1910 /* Simplify CODE, an operation with result mode MODE and three operands,
1911 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
1912 a constant. Return 0 if no simplifications is possible. */
1915 simplify_ternary_operation (code
, mode
, op0_mode
, op0
, op1
, op2
)
1917 enum machine_mode mode
, op0_mode
;
1920 unsigned int width
= GET_MODE_BITSIZE (mode
);
1922 /* VOIDmode means "infinite" precision. */
1924 width
= HOST_BITS_PER_WIDE_INT
;
1930 if (GET_CODE (op0
) == CONST_INT
1931 && GET_CODE (op1
) == CONST_INT
1932 && GET_CODE (op2
) == CONST_INT
1933 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
1934 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
1936 /* Extracting a bit-field from a constant */
1937 HOST_WIDE_INT val
= INTVAL (op0
);
1939 if (BITS_BIG_ENDIAN
)
1940 val
>>= (GET_MODE_BITSIZE (op0_mode
)
1941 - INTVAL (op2
) - INTVAL (op1
));
1943 val
>>= INTVAL (op2
);
1945 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
1947 /* First zero-extend. */
1948 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
1949 /* If desired, propagate sign bit. */
1950 if (code
== SIGN_EXTRACT
1951 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
1952 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
1955 /* Clear the bits that don't belong in our mode,
1956 unless they and our sign bit are all one.
1957 So we get either a reasonable negative value or a reasonable
1958 unsigned value for this mode. */
1959 if (width
< HOST_BITS_PER_WIDE_INT
1960 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
1961 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
1962 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1964 return GEN_INT (val
);
1969 if (GET_CODE (op0
) == CONST_INT
)
1970 return op0
!= const0_rtx
? op1
: op2
;
1972 /* Convert a == b ? b : a to "a". */
1973 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
1974 && rtx_equal_p (XEXP (op0
, 0), op1
)
1975 && rtx_equal_p (XEXP (op0
, 1), op2
))
1977 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
1978 && rtx_equal_p (XEXP (op0
, 1), op1
)
1979 && rtx_equal_p (XEXP (op0
, 0), op2
))
1981 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
1984 = simplify_relational_operation (GET_CODE (op0
), op0_mode
,
1985 XEXP (op0
, 0), XEXP (op0
, 1));
1987 /* See if any simplifications were possible. */
1988 if (temp
== const0_rtx
)
1990 else if (temp
== const1_rtx
)
1995 /* Look for happy constants in op1 and op2. */
1996 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
1998 HOST_WIDE_INT t
= INTVAL (op1
);
1999 HOST_WIDE_INT f
= INTVAL (op2
);
2001 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2002 code
= GET_CODE (op0
);
2003 else if (t
== 0 && f
== STORE_FLAG_VALUE
2004 && can_reverse_comparison_p (op0
, NULL_RTX
))
2005 code
= reverse_condition (GET_CODE (op0
));
2009 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2021 /* Simplify X, an rtx expression.
2023 Return the simplified expression or NULL if no simplifications
2026 This is the preferred entry point into the simplification routines;
2027 however, we still allow passes to call the more specific routines.
2029 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2030 code that need to be unified.
2032 1. fold_rtx in cse.c. This code uses various CSE specific
2033 information to aid in RTL simplification.
2035 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2036 it uses combine specific information to aid in RTL
2039 3. The routines in this file.
2042 Long term we want to only have one body of simplification code; to
2043 get to that state I recommend the following steps:
2045 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2046 which are not pass dependent state into these routines.
2048 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2049 use this routine whenever possible.
2051 3. Allow for pass dependent state to be provided to these
2052 routines and add simplifications based on the pass dependent
2053 state. Remove code from cse.c & combine.c that becomes
2056 It will take time, but ultimately the compiler will be easier to
2057 maintain and improve. It's totally silly that when we add a
2058 simplification that it needs to be added to 4 places (3 for RTL
2059 simplification and 1 for tree simplification. */
2066 enum machine_mode mode
;
2068 mode
= GET_MODE (x
);
2069 code
= GET_CODE (x
);
2071 switch (GET_RTX_CLASS (code
))
2074 return simplify_unary_operation (code
, mode
,
2075 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
2078 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
2082 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
2083 XEXP (x
, 0), XEXP (x
, 1), XEXP (x
, 2));
2086 return simplify_relational_operation (code
, GET_MODE (XEXP (x
, 0)),
2087 XEXP (x
, 0), XEXP (x
, 1));
2094 /* Allocate a struct elt_list and fill in its two elements with the
2097 static struct elt_list
*
2098 new_elt_list (next
, elt
)
2099 struct elt_list
*next
;
2102 struct elt_list
*el
= empty_elt_lists
;
2105 empty_elt_lists
= el
->next
;
2107 el
= (struct elt_list
*) obstack_alloc (&cselib_obstack
,
2108 sizeof (struct elt_list
));
2114 /* Allocate a struct elt_loc_list and fill in its two elements with the
2117 static struct elt_loc_list
*
2118 new_elt_loc_list (next
, loc
)
2119 struct elt_loc_list
*next
;
2122 struct elt_loc_list
*el
= empty_elt_loc_lists
;
2125 empty_elt_loc_lists
= el
->next
;
2127 el
= (struct elt_loc_list
*) obstack_alloc (&cselib_obstack
,
2128 sizeof (struct elt_loc_list
));
2131 el
->setting_insn
= cselib_current_insn
;
2135 /* The elt_list at *PL is no longer needed. Unchain it and free its
2139 unchain_one_elt_list (pl
)
2140 struct elt_list
**pl
;
2142 struct elt_list
*l
= *pl
;
2145 l
->next
= empty_elt_lists
;
2146 empty_elt_lists
= l
;
2149 /* Likewise for elt_loc_lists. */
2152 unchain_one_elt_loc_list (pl
)
2153 struct elt_loc_list
**pl
;
2155 struct elt_loc_list
*l
= *pl
;
2158 l
->next
= empty_elt_loc_lists
;
2159 empty_elt_loc_lists
= l
;
2162 /* Likewise for cselib_vals. This also frees the addr_list associated with
2166 unchain_one_value (v
)
2169 while (v
->addr_list
)
2170 unchain_one_elt_list (&v
->addr_list
);
2172 v
->u
.next_free
= empty_vals
;
2176 /* Remove all entries from the hash table. Also used during
2184 for (i
= 0; i
< cselib_nregs
; i
++)
2187 htab_empty (hash_table
);
2188 obstack_free (&cselib_obstack
, cselib_startobj
);
2191 empty_elt_lists
= 0;
2192 empty_elt_loc_lists
= 0;
2193 n_useless_values
= 0;
2195 next_unknown_value
= 0;
2198 /* The equality test for our hash table. The first argument ENTRY is a table
2199 element (i.e. a cselib_val), while the second arg X is an rtx. */
2202 entry_and_rtx_equal_p (entry
, x_arg
)
2203 const void *entry
, *x_arg
;
2205 struct elt_loc_list
*l
;
2206 const cselib_val
*v
= (const cselib_val
*) entry
;
2207 rtx x
= (rtx
) x_arg
;
2209 /* We don't guarantee that distinct rtx's have different hash values,
2210 so we need to do a comparison. */
2211 for (l
= v
->locs
; l
; l
= l
->next
)
2212 if (rtx_equal_for_cselib_p (l
->loc
, x
))
2218 /* The hash function for our hash table. The value is always computed with
2219 hash_rtx when adding an element; this function just extracts the hash
2220 value from a cselib_val structure. */
2223 get_value_hash (entry
)
2226 const cselib_val
*v
= (const cselib_val
*) entry
;
2230 /* Return true if X contains a VALUE rtx. If ONLY_USELESS is set, we
2231 only return true for values which point to a cselib_val whose value
2232 element has been set to zero, which implies the cselib_val will be
2236 references_value_p (x
, only_useless
)
2240 enum rtx_code code
= GET_CODE (x
);
2241 const char *fmt
= GET_RTX_FORMAT (code
);
2244 if (GET_CODE (x
) == VALUE
2245 && (! only_useless
|| CSELIB_VAL_PTR (x
)->locs
== 0))
2248 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2250 if (fmt
[i
] == 'e' && references_value_p (XEXP (x
, i
), only_useless
))
2252 else if (fmt
[i
] == 'E')
2253 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2254 if (references_value_p (XVECEXP (x
, i
, j
), only_useless
))
2261 /* For all locations found in X, delete locations that reference useless
2262 values (i.e. values without any location). Called through
2266 discard_useless_locs (x
, info
)
2268 void *info ATTRIBUTE_UNUSED
;
2270 cselib_val
*v
= (cselib_val
*)*x
;
2271 struct elt_loc_list
**p
= &v
->locs
;
2272 int had_locs
= v
->locs
!= 0;
2276 if (references_value_p ((*p
)->loc
, 1))
2277 unchain_one_elt_loc_list (p
);
2282 if (had_locs
&& v
->locs
== 0)
2285 values_became_useless
= 1;
2290 /* If X is a value with no locations, remove it from the hashtable. */
2293 discard_useless_values (x
, info
)
2295 void *info ATTRIBUTE_UNUSED
;
2297 cselib_val
*v
= (cselib_val
*)*x
;
2301 htab_clear_slot (hash_table
, x
);
2302 unchain_one_value (v
);
2309 /* Clean out useless values (i.e. those which no longer have locations
2310 associated with them) from the hash table. */
2313 remove_useless_values ()
2315 /* First pass: eliminate locations that reference the value. That in
2316 turn can make more values useless. */
2319 values_became_useless
= 0;
2320 htab_traverse (hash_table
, discard_useless_locs
, 0);
2322 while (values_became_useless
);
2324 /* Second pass: actually remove the values. */
2325 htab_traverse (hash_table
, discard_useless_values
, 0);
2327 if (n_useless_values
!= 0)
2331 /* Return nonzero if we can prove that X and Y contain the same value, taking
2332 our gathered information into account. */
2335 rtx_equal_for_cselib_p (x
, y
)
2342 if (GET_CODE (x
) == REG
|| GET_CODE (x
) == MEM
)
2344 cselib_val
*e
= cselib_lookup (x
, VOIDmode
, 0);
2350 if (GET_CODE (y
) == REG
|| GET_CODE (y
) == MEM
)
2352 cselib_val
*e
= cselib_lookup (y
, VOIDmode
, 0);
2361 if (GET_CODE (x
) == VALUE
&& GET_CODE (y
) == VALUE
)
2362 return CSELIB_VAL_PTR (x
) == CSELIB_VAL_PTR (y
);
2364 if (GET_CODE (x
) == VALUE
)
2366 cselib_val
*e
= CSELIB_VAL_PTR (x
);
2367 struct elt_loc_list
*l
;
2369 for (l
= e
->locs
; l
; l
= l
->next
)
2373 /* Avoid infinite recursion. */
2374 if (GET_CODE (t
) == REG
|| GET_CODE (t
) == MEM
)
2376 else if (rtx_equal_for_cselib_p (t
, y
))
2383 if (GET_CODE (y
) == VALUE
)
2385 cselib_val
*e
= CSELIB_VAL_PTR (y
);
2386 struct elt_loc_list
*l
;
2388 for (l
= e
->locs
; l
; l
= l
->next
)
2392 if (GET_CODE (t
) == REG
|| GET_CODE (t
) == MEM
)
2394 else if (rtx_equal_for_cselib_p (x
, t
))
2401 if (GET_CODE (x
) != GET_CODE (y
) || GET_MODE (x
) != GET_MODE (y
))
2404 /* This won't be handled correctly by the code below. */
2405 if (GET_CODE (x
) == LABEL_REF
)
2406 return XEXP (x
, 0) == XEXP (y
, 0);
2408 code
= GET_CODE (x
);
2409 fmt
= GET_RTX_FORMAT (code
);
2411 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2418 if (XWINT (x
, i
) != XWINT (y
, i
))
2424 if (XINT (x
, i
) != XINT (y
, i
))
2430 /* Two vectors must have the same length. */
2431 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
2434 /* And the corresponding elements must match. */
2435 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2436 if (! rtx_equal_for_cselib_p (XVECEXP (x
, i
, j
),
2442 if (! rtx_equal_for_cselib_p (XEXP (x
, i
), XEXP (y
, i
)))
2448 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
2453 /* These are just backpointers, so they don't matter. */
2460 /* It is believed that rtx's at this level will never
2461 contain anything but integers and other rtx's,
2462 except for within LABEL_REFs and SYMBOL_REFs. */
2470 /* Hash an rtx. Return 0 if we couldn't hash the rtx.
2471 For registers and memory locations, we look up their cselib_val structure
2472 and return its VALUE element.
2473 Possible reasons for return 0 are: the object is volatile, or we couldn't
2474 find a register or memory location in the table and CREATE is zero. If
2475 CREATE is nonzero, table elts are created for regs and mem.
2476 MODE is used in hashing for CONST_INTs only;
2477 otherwise the mode of X is used. */
2480 hash_rtx (x
, mode
, create
)
2482 enum machine_mode mode
;
2489 unsigned int hash
= 0;
2491 /* repeat is used to turn tail-recursion into iteration. */
2493 code
= GET_CODE (x
);
2494 hash
+= (unsigned) code
+ (unsigned) GET_MODE (x
);
2500 e
= cselib_lookup (x
, GET_MODE (x
), create
);
2508 hash
+= ((unsigned) CONST_INT
<< 7) + (unsigned) mode
+ INTVAL (x
);
2509 return hash
? hash
: CONST_INT
;
2512 /* This is like the general case, except that it only counts
2513 the integers representing the constant. */
2514 hash
+= (unsigned) code
+ (unsigned) GET_MODE (x
);
2515 if (GET_MODE (x
) != VOIDmode
)
2516 for (i
= 2; i
< GET_RTX_LENGTH (CONST_DOUBLE
); i
++)
2517 hash
+= XWINT (x
, i
);
2519 hash
+= ((unsigned) CONST_DOUBLE_LOW (x
)
2520 + (unsigned) CONST_DOUBLE_HIGH (x
));
2521 return hash
? hash
: CONST_DOUBLE
;
2523 /* Assume there is only one rtx object for any given label. */
2526 += ((unsigned) LABEL_REF
<< 7) + (unsigned long) XEXP (x
, 0);
2527 return hash
? hash
: LABEL_REF
;
2531 += ((unsigned) SYMBOL_REF
<< 7) + (unsigned long) XSTR (x
, 0);
2532 return hash
? hash
: SYMBOL_REF
;
2541 case UNSPEC_VOLATILE
:
2545 if (MEM_VOLATILE_P (x
))
2554 i
= GET_RTX_LENGTH (code
) - 1;
2555 fmt
= GET_RTX_FORMAT (code
);
2560 rtx tem
= XEXP (x
, i
);
2561 unsigned int tem_hash
;
2563 /* If we are about to do the last recursive call
2564 needed at this level, change it into iteration.
2565 This function is called enough to be worth it. */
2572 tem_hash
= hash_rtx (tem
, 0, create
);
2578 else if (fmt
[i
] == 'E')
2579 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2581 unsigned int tem_hash
= hash_rtx (XVECEXP (x
, i
, j
), 0, create
);
2588 else if (fmt
[i
] == 's')
2590 const unsigned char *p
= (const unsigned char *) XSTR (x
, i
);
2596 else if (fmt
[i
] == 'i')
2597 hash
+= XINT (x
, i
);
2598 else if (fmt
[i
] == '0' || fmt
[i
] == 't')
2604 return hash
? hash
: 1 + GET_CODE (x
);
2607 /* Create a new value structure for VALUE and initialize it. The mode of the
2611 new_cselib_val (value
, mode
)
2613 enum machine_mode mode
;
2615 cselib_val
*e
= empty_vals
;
2618 empty_vals
= e
->u
.next_free
;
2620 e
= (cselib_val
*) obstack_alloc (&cselib_obstack
, sizeof (cselib_val
));
2626 e
->u
.val_rtx
= gen_rtx_VALUE (mode
);
2627 CSELIB_VAL_PTR (e
->u
.val_rtx
) = e
;
2633 /* ADDR_ELT is a value that is used as address. MEM_ELT is the value that
2634 contains the data at this address. X is a MEM that represents the
2635 value. Update the two value structures to represent this situation. */
2638 add_mem_for_addr (addr_elt
, mem_elt
, x
)
2639 cselib_val
*addr_elt
, *mem_elt
;
2643 struct elt_loc_list
*l
;
2645 /* Avoid duplicates. */
2646 for (l
= mem_elt
->locs
; l
; l
= l
->next
)
2647 if (GET_CODE (l
->loc
) == MEM
2648 && CSELIB_VAL_PTR (XEXP (l
->loc
, 0)) == addr_elt
)
2651 new = gen_rtx_MEM (GET_MODE (x
), addr_elt
->u
.val_rtx
);
2652 MEM_COPY_ATTRIBUTES (new, x
);
2654 addr_elt
->addr_list
= new_elt_list (addr_elt
->addr_list
, mem_elt
);
2655 mem_elt
->locs
= new_elt_loc_list (mem_elt
->locs
, new);
2658 /* Subroutine of cselib_lookup. Return a value for X, which is a MEM rtx.
2659 If CREATE, make a new one if we haven't seen it before. */
2662 cselib_lookup_mem (x
, create
)
2668 cselib_val
*mem_elt
;
2671 if (MEM_VOLATILE_P (x
) || GET_MODE (x
) == BLKmode
2672 || (FLOAT_MODE_P (GET_MODE (x
)) && flag_float_store
))
2675 /* Look up the value for the address. */
2676 addr
= cselib_lookup (XEXP (x
, 0), GET_MODE (x
), create
);
2680 /* Find a value that describes a value of our mode at that address. */
2681 for (l
= addr
->addr_list
; l
; l
= l
->next
)
2682 if (GET_MODE (l
->elt
->u
.val_rtx
) == GET_MODE (x
))
2688 mem_elt
= new_cselib_val (++next_unknown_value
, GET_MODE (x
));
2689 add_mem_for_addr (addr
, mem_elt
, x
);
2690 slot
= htab_find_slot_with_hash (hash_table
, x
, mem_elt
->value
, INSERT
);
2695 /* Walk rtx X and replace all occurrences of REG and MEM subexpressions
2696 with VALUE expressions. This way, it becomes independent of changes
2697 to registers and memory.
2698 X isn't actually modified; if modifications are needed, new rtl is
2699 allocated. However, the return value can share rtl with X. */
2702 cselib_subst_to_values (x
)
2705 enum rtx_code code
= GET_CODE (x
);
2706 const char *fmt
= GET_RTX_FORMAT (code
);
2715 for (l
= REG_VALUES (REGNO (x
)); l
; l
= l
->next
)
2716 if (GET_MODE (l
->elt
->u
.val_rtx
) == GET_MODE (x
))
2717 return l
->elt
->u
.val_rtx
;
2722 e
= cselib_lookup_mem (x
, 0);
2725 return e
->u
.val_rtx
;
2727 /* CONST_DOUBLEs must be special-cased here so that we won't try to
2728 look up the CONST_DOUBLE_MEM inside. */
2737 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2741 rtx t
= cselib_subst_to_values (XEXP (x
, i
));
2743 if (t
!= XEXP (x
, i
) && x
== copy
)
2744 copy
= shallow_copy_rtx (x
);
2748 else if (fmt
[i
] == 'E')
2752 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2754 rtx t
= cselib_subst_to_values (XVECEXP (x
, i
, j
));
2756 if (t
!= XVECEXP (x
, i
, j
) && XVEC (x
, i
) == XVEC (copy
, i
))
2759 copy
= shallow_copy_rtx (x
);
2761 XVEC (copy
, i
) = rtvec_alloc (XVECLEN (x
, i
));
2762 for (k
= 0; k
< j
; k
++)
2763 XVECEXP (copy
, i
, k
) = XVECEXP (x
, i
, k
);
2766 XVECEXP (copy
, i
, j
) = t
;
2774 /* Look up the rtl expression X in our tables and return the value it has.
2775 If CREATE is zero, we return NULL if we don't know the value. Otherwise,
2776 we create a new one if possible, using mode MODE if X doesn't have a mode
2777 (i.e. because it's a constant). */
2780 cselib_lookup (x
, mode
, create
)
2782 enum machine_mode mode
;
2787 unsigned int hashval
;
2789 if (GET_MODE (x
) != VOIDmode
)
2790 mode
= GET_MODE (x
);
2792 if (GET_CODE (x
) == VALUE
)
2793 return CSELIB_VAL_PTR (x
);
2795 if (GET_CODE (x
) == REG
)
2798 unsigned int i
= REGNO (x
);
2800 for (l
= REG_VALUES (i
); l
; l
= l
->next
)
2801 if (mode
== GET_MODE (l
->elt
->u
.val_rtx
))
2807 e
= new_cselib_val (++next_unknown_value
, GET_MODE (x
));
2808 e
->locs
= new_elt_loc_list (e
->locs
, x
);
2809 REG_VALUES (i
) = new_elt_list (REG_VALUES (i
), e
);
2810 slot
= htab_find_slot_with_hash (hash_table
, x
, e
->value
, INSERT
);
2815 if (GET_CODE (x
) == MEM
)
2816 return cselib_lookup_mem (x
, create
);
2818 hashval
= hash_rtx (x
, mode
, create
);
2819 /* Can't even create if hashing is not possible. */
2823 slot
= htab_find_slot_with_hash (hash_table
, x
, hashval
,
2824 create
? INSERT
: NO_INSERT
);
2828 e
= (cselib_val
*) *slot
;
2832 e
= new_cselib_val (hashval
, mode
);
2834 /* We have to fill the slot before calling cselib_subst_to_values:
2835 the hash table is inconsistent until we do so, and
2836 cselib_subst_to_values will need to do lookups. */
2838 e
->locs
= new_elt_loc_list (e
->locs
, cselib_subst_to_values (x
));
2842 /* Invalidate any entries in reg_values that overlap REGNO. This is called
2843 if REGNO is changing. MODE is the mode of the assignment to REGNO, which
2844 is used to determine how many hard registers are being changed. If MODE
2845 is VOIDmode, then only REGNO is being changed; this is used when
2846 invalidating call clobbered registers across a call. */
2849 cselib_invalidate_regno (regno
, mode
)
2851 enum machine_mode mode
;
2853 unsigned int endregno
;
2856 /* If we see pseudos after reload, something is _wrong_. */
2857 if (reload_completed
&& regno
>= FIRST_PSEUDO_REGISTER
2858 && reg_renumber
[regno
] >= 0)
2861 /* Determine the range of registers that must be invalidated. For
2862 pseudos, only REGNO is affected. For hard regs, we must take MODE
2863 into account, and we must also invalidate lower register numbers
2864 if they contain values that overlap REGNO. */
2865 endregno
= regno
+ 1;
2866 if (regno
< FIRST_PSEUDO_REGISTER
&& mode
!= VOIDmode
)
2867 endregno
= regno
+ HARD_REGNO_NREGS (regno
, mode
);
2869 for (i
= 0; i
< endregno
; i
++)
2871 struct elt_list
**l
= ®_VALUES (i
);
2873 /* Go through all known values for this reg; if it overlaps the range
2874 we're invalidating, remove the value. */
2877 cselib_val
*v
= (*l
)->elt
;
2878 struct elt_loc_list
**p
;
2879 unsigned int this_last
= i
;
2881 if (i
< FIRST_PSEUDO_REGISTER
)
2882 this_last
+= HARD_REGNO_NREGS (i
, GET_MODE (v
->u
.val_rtx
)) - 1;
2884 if (this_last
< regno
)
2890 /* We have an overlap. */
2891 unchain_one_elt_list (l
);
2893 /* Now, we clear the mapping from value to reg. It must exist, so
2894 this code will crash intentionally if it doesn't. */
2895 for (p
= &v
->locs
; ; p
= &(*p
)->next
)
2899 if (GET_CODE (x
) == REG
&& REGNO (x
) == i
)
2901 unchain_one_elt_loc_list (p
);
2911 /* The memory at address MEM_BASE is being changed.
2912 Return whether this change will invalidate VAL. */
2915 cselib_mem_conflict_p (mem_base
, val
)
2923 code
= GET_CODE (val
);
2926 /* Get rid of a few simple cases quickly. */
2939 if (GET_MODE (mem_base
) == BLKmode
2940 || GET_MODE (val
) == BLKmode
2941 || anti_dependence (val
, mem_base
))
2944 /* The address may contain nested MEMs. */
2951 fmt
= GET_RTX_FORMAT (code
);
2952 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2956 if (cselib_mem_conflict_p (mem_base
, XEXP (val
, i
)))
2959 else if (fmt
[i
] == 'E')
2960 for (j
= 0; j
< XVECLEN (val
, i
); j
++)
2961 if (cselib_mem_conflict_p (mem_base
, XVECEXP (val
, i
, j
)))
2968 /* For the value found in SLOT, walk its locations to determine if any overlap
2969 INFO (which is a MEM rtx). */
2972 cselib_invalidate_mem_1 (slot
, info
)
2976 cselib_val
*v
= (cselib_val
*) *slot
;
2977 rtx mem_rtx
= (rtx
) info
;
2978 struct elt_loc_list
**p
= &v
->locs
;
2979 int had_locs
= v
->locs
!= 0;
2985 struct elt_list
**mem_chain
;
2987 /* MEMs may occur in locations only at the top level; below
2988 that every MEM or REG is substituted by its VALUE. */
2989 if (GET_CODE (x
) != MEM
2990 || ! cselib_mem_conflict_p (mem_rtx
, x
))
2996 /* This one overlaps. */
2997 /* We must have a mapping from this MEM's address to the
2998 value (E). Remove that, too. */
2999 addr
= cselib_lookup (XEXP (x
, 0), VOIDmode
, 0);
3000 mem_chain
= &addr
->addr_list
;
3003 if ((*mem_chain
)->elt
== v
)
3005 unchain_one_elt_list (mem_chain
);
3009 mem_chain
= &(*mem_chain
)->next
;
3012 unchain_one_elt_loc_list (p
);
3015 if (had_locs
&& v
->locs
== 0)
3021 /* Invalidate any locations in the table which are changed because of a
3022 store to MEM_RTX. If this is called because of a non-const call
3023 instruction, MEM_RTX is (mem:BLK const0_rtx). */
3026 cselib_invalidate_mem (mem_rtx
)
3029 htab_traverse (hash_table
, cselib_invalidate_mem_1
, mem_rtx
);
3032 /* Invalidate DEST, which is being assigned to or clobbered. The second and
3033 the third parameter exist so that this function can be passed to
3034 note_stores; they are ignored. */
3037 cselib_invalidate_rtx (dest
, ignore
, data
)
3039 rtx ignore ATTRIBUTE_UNUSED
;
3040 void *data ATTRIBUTE_UNUSED
;
3042 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SIGN_EXTRACT
3043 || GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SUBREG
)
3044 dest
= XEXP (dest
, 0);
3046 if (GET_CODE (dest
) == REG
)
3047 cselib_invalidate_regno (REGNO (dest
), GET_MODE (dest
));
3048 else if (GET_CODE (dest
) == MEM
)
3049 cselib_invalidate_mem (dest
);
3051 /* Some machines don't define AUTO_INC_DEC, but they still use push
3052 instructions. We need to catch that case here in order to
3053 invalidate the stack pointer correctly. Note that invalidating
3054 the stack pointer is different from invalidating DEST. */
3055 if (push_operand (dest
, GET_MODE (dest
)))
3056 cselib_invalidate_rtx (stack_pointer_rtx
, NULL_RTX
, NULL
);
3059 /* Record the result of a SET instruction. DEST is being set; the source
3060 contains the value described by SRC_ELT. If DEST is a MEM, DEST_ADDR_ELT
3061 describes its address. */
3064 cselib_record_set (dest
, src_elt
, dest_addr_elt
)
3066 cselib_val
*src_elt
, *dest_addr_elt
;
3068 int dreg
= GET_CODE (dest
) == REG
? (int) REGNO (dest
) : -1;
3070 if (src_elt
== 0 || side_effects_p (dest
))
3075 REG_VALUES (dreg
) = new_elt_list (REG_VALUES (dreg
), src_elt
);
3076 if (src_elt
->locs
== 0)
3078 src_elt
->locs
= new_elt_loc_list (src_elt
->locs
, dest
);
3080 else if (GET_CODE (dest
) == MEM
&& dest_addr_elt
!= 0)
3082 if (src_elt
->locs
== 0)
3084 add_mem_for_addr (dest_addr_elt
, src_elt
, dest
);
3088 /* Describe a single set that is part of an insn. */
3093 cselib_val
*src_elt
;
3094 cselib_val
*dest_addr_elt
;
3097 /* There is no good way to determine how many elements there can be
3098 in a PARALLEL. Since it's fairly cheap, use a really large number. */
3099 #define MAX_SETS (FIRST_PSEUDO_REGISTER * 2)
3101 /* Record the effects of any sets in INSN. */
3103 cselib_record_sets (insn
)
3108 struct set sets
[MAX_SETS
];
3109 rtx body
= PATTERN (insn
);
3111 body
= PATTERN (insn
);
3112 /* Find all sets. */
3113 if (GET_CODE (body
) == SET
)
3115 sets
[0].src
= SET_SRC (body
);
3116 sets
[0].dest
= SET_DEST (body
);
3119 else if (GET_CODE (body
) == PARALLEL
)
3121 /* Look through the PARALLEL and record the values being
3122 set, if possible. Also handle any CLOBBERs. */
3123 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; --i
)
3125 rtx x
= XVECEXP (body
, 0, i
);
3127 if (GET_CODE (x
) == SET
)
3129 sets
[n_sets
].src
= SET_SRC (x
);
3130 sets
[n_sets
].dest
= SET_DEST (x
);
3136 /* Look up the values that are read. Do this before invalidating the
3137 locations that are written. */
3138 for (i
= 0; i
< n_sets
; i
++)
3140 sets
[i
].src_elt
= cselib_lookup (sets
[i
].src
, GET_MODE (sets
[i
].dest
),
3142 if (GET_CODE (sets
[i
].dest
) == MEM
)
3143 sets
[i
].dest_addr_elt
= cselib_lookup (XEXP (sets
[i
].dest
, 0), Pmode
,
3146 sets
[i
].dest_addr_elt
= 0;
3149 /* Invalidate all locations written by this insn. Note that the elts we
3150 looked up in the previous loop aren't affected, just some of their
3151 locations may go away. */
3152 note_stores (body
, cselib_invalidate_rtx
, NULL
);
3154 /* Now enter the equivalences in our tables. */
3155 for (i
= 0; i
< n_sets
; i
++)
3156 cselib_record_set (sets
[i
].dest
, sets
[i
].src_elt
, sets
[i
].dest_addr_elt
);
3159 /* Record the effects of INSN. */
3162 cselib_process_insn (insn
)
3168 cselib_current_insn
= insn
;
3170 /* Forget everything at a CODE_LABEL, a volatile asm, or a setjmp. */
3171 if (GET_CODE (insn
) == CODE_LABEL
3172 || (GET_CODE (insn
) == NOTE
3173 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_SETJMP
)
3174 || (GET_CODE (insn
) == INSN
3175 && GET_CODE (PATTERN (insn
)) == ASM_OPERANDS
3176 && MEM_VOLATILE_P (PATTERN (insn
))))
3182 if (GET_RTX_CLASS (GET_CODE (insn
)) != 'i')
3184 cselib_current_insn
= 0;
3188 /* If this is a call instruction, forget anything stored in a
3189 call clobbered register, or, if this is not a const call, in
3191 if (GET_CODE (insn
) == CALL_INSN
)
3193 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3194 if (call_used_regs
[i
])
3195 cselib_invalidate_regno (i
, VOIDmode
);
3197 if (! CONST_CALL_P (insn
))
3198 cselib_invalidate_mem (callmem
);
3201 cselib_record_sets (insn
);
3204 /* Clobber any registers which appear in REG_INC notes. We
3205 could keep track of the changes to their values, but it is
3206 unlikely to help. */
3207 for (x
= REG_NOTES (insn
); x
; x
= XEXP (x
, 1))
3208 if (REG_NOTE_KIND (x
) == REG_INC
)
3209 cselib_invalidate_rtx (XEXP (x
, 0), NULL_RTX
, NULL
);
3212 /* Look for any CLOBBERs in CALL_INSN_FUNCTION_USAGE, but only
3213 after we have processed the insn. */
3214 if (GET_CODE (insn
) == CALL_INSN
)
3215 for (x
= CALL_INSN_FUNCTION_USAGE (insn
); x
; x
= XEXP (x
, 1))
3216 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
3217 cselib_invalidate_rtx (XEXP (XEXP (x
, 0), 0), NULL_RTX
, NULL
);
3219 cselib_current_insn
= 0;
3221 if (n_useless_values
> MAX_USELESS_VALUES
)
3222 remove_useless_values ();
3225 /* Make sure our varrays are big enough. Not called from any cselib routines;
3226 it must be called by the user if it allocated new registers. */
3229 cselib_update_varray_sizes ()
3231 unsigned int nregs
= max_reg_num ();
3233 if (nregs
== cselib_nregs
)
3236 cselib_nregs
= nregs
;
3237 VARRAY_GROW (reg_values
, nregs
);
3240 /* Initialize cselib for one pass. The caller must also call
3241 init_alias_analysis. */
3246 /* These are only created once. */
3249 extern struct obstack permanent_obstack
;
3251 gcc_obstack_init (&cselib_obstack
);
3252 cselib_startobj
= obstack_alloc (&cselib_obstack
, 0);
3254 push_obstacks (&permanent_obstack
, &permanent_obstack
);
3255 callmem
= gen_rtx_MEM (BLKmode
, const0_rtx
);
3257 ggc_add_rtx_root (&callmem
, 1);
3260 cselib_nregs
= max_reg_num ();
3261 VARRAY_ELT_LIST_INIT (reg_values
, cselib_nregs
, "reg_values");
3262 hash_table
= htab_create (31, get_value_hash
, entry_and_rtx_equal_p
, NULL
);
3266 /* Called when the current user is done with cselib. */
3272 htab_delete (hash_table
);