1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
33 #include "insn-config.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
47 virtual regs here because the simplify_*_operation routines are called
48 by integrate.c, which is called before virtual register instantiation.
50 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
51 a header file so that their definitions can be shared with the
52 simplification routines in simplify-rtx.c. Until then, do not
53 change these macros without also changing the copy in simplify-rtx.c. */
55 #define FIXED_BASE_PLUS_P(X) \
56 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
57 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
58 || (X) == virtual_stack_vars_rtx \
59 || (X) == virtual_incoming_args_rtx \
60 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
61 && (XEXP (X, 0) == frame_pointer_rtx \
62 || XEXP (X, 0) == hard_frame_pointer_rtx \
63 || ((X) == arg_pointer_rtx \
64 && fixed_regs[ARG_POINTER_REGNUM]) \
65 || XEXP (X, 0) == virtual_stack_vars_rtx \
66 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
67 || GET_CODE (X) == ADDRESSOF)
69 /* Similar, but also allows reference to the stack pointer.
71 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
72 arg_pointer_rtx by itself is nonzero, because on at least one machine,
73 the i960, the arg pointer is zero when it is unused. */
75 #define NONZERO_BASE_PLUS_P(X) \
76 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
77 || (X) == virtual_stack_vars_rtx \
78 || (X) == virtual_incoming_args_rtx \
79 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
80 && (XEXP (X, 0) == frame_pointer_rtx \
81 || XEXP (X, 0) == hard_frame_pointer_rtx \
82 || ((X) == arg_pointer_rtx \
83 && fixed_regs[ARG_POINTER_REGNUM]) \
84 || XEXP (X, 0) == virtual_stack_vars_rtx \
85 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
86 || (X) == stack_pointer_rtx \
87 || (X) == virtual_stack_dynamic_rtx \
88 || (X) == virtual_outgoing_args_rtx \
89 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
90 && (XEXP (X, 0) == stack_pointer_rtx \
91 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
92 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
93 || GET_CODE (X) == ADDRESSOF)
95 /* Much code operates on (low, high) pairs; the low value is an
96 unsigned wide int, the high value a signed wide int. We
97 occasionally need to sign extend from low to high as if low were a
99 #define HWI_SIGN_EXTEND(low) \
100 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
102 static rtx simplify_plus_minus
PARAMS ((enum rtx_code
,
103 enum machine_mode
, rtx
, rtx
));
104 static void check_fold_consts
PARAMS ((PTR
));
105 static int entry_and_rtx_equal_p
PARAMS ((const void *, const void *));
106 static unsigned int get_value_hash
PARAMS ((const void *));
107 static struct elt_list
*new_elt_list
PARAMS ((struct elt_list
*,
109 static struct elt_loc_list
*new_elt_loc_list
PARAMS ((struct elt_loc_list
*,
111 static void unchain_one_value
PARAMS ((cselib_val
*));
112 static void unchain_one_elt_list
PARAMS ((struct elt_list
**));
113 static void unchain_one_elt_loc_list
PARAMS ((struct elt_loc_list
**));
114 static void clear_table
PARAMS ((void));
115 static int discard_useless_locs
PARAMS ((void **, void *));
116 static int discard_useless_values
PARAMS ((void **, void *));
117 static void remove_useless_values
PARAMS ((void));
118 static unsigned int hash_rtx
PARAMS ((rtx
, enum machine_mode
, int));
119 static cselib_val
*new_cselib_val
PARAMS ((unsigned int,
121 static void add_mem_for_addr
PARAMS ((cselib_val
*, cselib_val
*,
123 static cselib_val
*cselib_lookup_mem
PARAMS ((rtx
, int));
124 static rtx cselib_subst_to_values
PARAMS ((rtx
));
125 static void cselib_invalidate_regno
PARAMS ((unsigned int,
127 static int cselib_mem_conflict_p
PARAMS ((rtx
, rtx
));
128 static int cselib_invalidate_mem_1
PARAMS ((void **, void *));
129 static void cselib_invalidate_mem
PARAMS ((rtx
));
130 static void cselib_invalidate_rtx
PARAMS ((rtx
, rtx
, void *));
131 static void cselib_record_set
PARAMS ((rtx
, cselib_val
*,
133 static void cselib_record_sets
PARAMS ((rtx
));
135 /* There are three ways in which cselib can look up an rtx:
136 - for a REG, the reg_values table (which is indexed by regno) is used
137 - for a MEM, we recursively look up its address and then follow the
138 addr_list of that value
139 - for everything else, we compute a hash value and go through the hash
140 table. Since different rtx's can still have the same hash value,
141 this involves walking the table entries for a given value and comparing
142 the locations of the entries with the rtx we are looking up. */
144 /* A table that enables us to look up elts by their value. */
145 static htab_t hash_table
;
147 /* This is a global so we don't have to pass this through every function.
148 It is used in new_elt_loc_list to set SETTING_INSN. */
149 static rtx cselib_current_insn
;
151 /* Every new unknown value gets a unique number. */
152 static unsigned int next_unknown_value
;
154 /* The number of registers we had when the varrays were last resized. */
155 static unsigned int cselib_nregs
;
157 /* Count values without known locations. Whenever this grows too big, we
158 remove these useless values from the table. */
159 static int n_useless_values
;
161 /* Number of useless values before we remove them from the hash table. */
162 #define MAX_USELESS_VALUES 32
164 /* This table maps from register number to values. It does not contain
165 pointers to cselib_val structures, but rather elt_lists. The purpose is
166 to be able to refer to the same register in different modes. */
167 static varray_type reg_values
;
168 #define REG_VALUES(I) VARRAY_ELT_LIST (reg_values, (I))
170 /* We pass this to cselib_invalidate_mem to invalidate all of
171 memory for a non-const call instruction. */
174 /* Memory for our structures is allocated from this obstack. */
175 static struct obstack cselib_obstack
;
177 /* Used to quickly free all memory. */
178 static char *cselib_startobj
;
180 /* Caches for unused structures. */
181 static cselib_val
*empty_vals
;
182 static struct elt_list
*empty_elt_lists
;
183 static struct elt_loc_list
*empty_elt_loc_lists
;
185 /* Set by discard_useless_locs if it deleted the last location of any
187 static int values_became_useless
;
189 /* Make a binary operation by properly ordering the operands and
190 seeing if the expression folds. */
193 simplify_gen_binary (code
, mode
, op0
, op1
)
195 enum machine_mode mode
;
200 /* Put complex operands first and constants second if commutative. */
201 if (GET_RTX_CLASS (code
) == 'c'
202 && ((CONSTANT_P (op0
) && GET_CODE (op1
) != CONST_INT
)
203 || (GET_RTX_CLASS (GET_CODE (op0
)) == 'o'
204 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')
205 || (GET_CODE (op0
) == SUBREG
206 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0
))) == 'o'
207 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')))
208 tem
= op0
, op0
= op1
, op1
= tem
;
210 /* If this simplifies, do it. */
211 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
216 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
217 just form the operation. */
219 if (code
== PLUS
&& GET_CODE (op1
) == CONST_INT
220 && GET_MODE (op0
) != VOIDmode
)
221 return plus_constant (op0
, INTVAL (op1
));
222 else if (code
== MINUS
&& GET_CODE (op1
) == CONST_INT
223 && GET_MODE (op0
) != VOIDmode
)
224 return plus_constant (op0
, - INTVAL (op1
));
226 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
229 /* Try to simplify a unary operation CODE whose output mode is to be
230 MODE with input operand OP whose mode was originally OP_MODE.
231 Return zero if no simplification can be made. */
234 simplify_unary_operation (code
, mode
, op
, op_mode
)
236 enum machine_mode mode
;
238 enum machine_mode op_mode
;
240 unsigned int width
= GET_MODE_BITSIZE (mode
);
242 /* The order of these tests is critical so that, for example, we don't
243 check the wrong mode (input vs. output) for a conversion operation,
244 such as FIX. At some point, this should be simplified. */
246 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
248 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
249 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
251 HOST_WIDE_INT hv
, lv
;
254 if (GET_CODE (op
) == CONST_INT
)
255 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
257 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
259 #ifdef REAL_ARITHMETIC
260 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
265 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
266 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
267 d
+= (double) (unsigned HOST_WIDE_INT
) (~ lv
);
273 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
274 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
275 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
277 #endif /* REAL_ARITHMETIC */
278 d
= real_value_truncate (mode
, d
);
279 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
281 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
282 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
284 HOST_WIDE_INT hv
, lv
;
287 if (GET_CODE (op
) == CONST_INT
)
288 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
290 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
292 if (op_mode
== VOIDmode
)
294 /* We don't know how to interpret negative-looking numbers in
295 this case, so don't try to fold those. */
299 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
302 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
304 #ifdef REAL_ARITHMETIC
305 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
308 d
= (double) (unsigned HOST_WIDE_INT
) hv
;
309 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
310 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
311 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
312 #endif /* REAL_ARITHMETIC */
313 d
= real_value_truncate (mode
, d
);
314 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
318 if (GET_CODE (op
) == CONST_INT
319 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
321 register HOST_WIDE_INT arg0
= INTVAL (op
);
322 register HOST_WIDE_INT val
;
335 val
= (arg0
>= 0 ? arg0
: - arg0
);
339 /* Don't use ffs here. Instead, get low order bit and then its
340 number. If arg0 is zero, this will return 0, as desired. */
341 arg0
&= GET_MODE_MASK (mode
);
342 val
= exact_log2 (arg0
& (- arg0
)) + 1;
350 if (op_mode
== VOIDmode
)
352 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
354 /* If we were really extending the mode,
355 we would have to distinguish between zero-extension
356 and sign-extension. */
357 if (width
!= GET_MODE_BITSIZE (op_mode
))
361 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
362 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
368 if (op_mode
== VOIDmode
)
370 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
372 /* If we were really extending the mode,
373 we would have to distinguish between zero-extension
374 and sign-extension. */
375 if (width
!= GET_MODE_BITSIZE (op_mode
))
379 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
382 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
384 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
385 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
400 val
= trunc_int_for_mode (val
, mode
);
402 return GEN_INT (val
);
405 /* We can do some operations on integer CONST_DOUBLEs. Also allow
406 for a DImode operation on a CONST_INT. */
407 else if (GET_MODE (op
) == VOIDmode
&& width
<= HOST_BITS_PER_INT
* 2
408 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
410 unsigned HOST_WIDE_INT l1
, lv
;
411 HOST_WIDE_INT h1
, hv
;
413 if (GET_CODE (op
) == CONST_DOUBLE
)
414 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
416 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
426 neg_double (l1
, h1
, &lv
, &hv
);
431 neg_double (l1
, h1
, &lv
, &hv
);
439 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& (-h1
)) + 1;
441 lv
= exact_log2 (l1
& (-l1
)) + 1;
445 /* This is just a change-of-mode, so do nothing. */
450 if (op_mode
== VOIDmode
451 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
455 lv
= l1
& GET_MODE_MASK (op_mode
);
459 if (op_mode
== VOIDmode
460 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
464 lv
= l1
& GET_MODE_MASK (op_mode
);
465 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
466 && (lv
& ((HOST_WIDE_INT
) 1
467 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
468 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
470 hv
= HWI_SIGN_EXTEND (lv
);
481 return immed_double_const (lv
, hv
, mode
);
484 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
485 else if (GET_CODE (op
) == CONST_DOUBLE
486 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
492 if (setjmp (handler
))
493 /* There used to be a warning here, but that is inadvisable.
494 People may want to cause traps, and the natural way
495 to do it should not get a warning. */
498 set_float_handler (handler
);
500 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
505 d
= REAL_VALUE_NEGATE (d
);
509 if (REAL_VALUE_NEGATIVE (d
))
510 d
= REAL_VALUE_NEGATE (d
);
514 d
= real_value_truncate (mode
, d
);
518 /* All this does is change the mode. */
522 d
= REAL_VALUE_RNDZINT (d
);
526 d
= REAL_VALUE_UNSIGNED_RNDZINT (d
);
536 x
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
537 set_float_handler (NULL_PTR
);
541 else if (GET_CODE (op
) == CONST_DOUBLE
542 && GET_MODE_CLASS (GET_MODE (op
)) == MODE_FLOAT
543 && GET_MODE_CLASS (mode
) == MODE_INT
544 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
550 if (setjmp (handler
))
553 set_float_handler (handler
);
555 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
560 val
= REAL_VALUE_FIX (d
);
564 val
= REAL_VALUE_UNSIGNED_FIX (d
);
571 set_float_handler (NULL_PTR
);
573 val
= trunc_int_for_mode (val
, mode
);
575 return GEN_INT (val
);
578 /* This was formerly used only for non-IEEE float.
579 eggert@twinsun.com says it is safe for IEEE also. */
582 /* There are some simplifications we can do even if the operands
587 /* (not (not X)) == X. */
588 if (GET_CODE (op
) == NOT
)
591 /* (not (eq X Y)) == (ne X Y), etc. */
592 if (mode
== BImode
&& GET_RTX_CLASS (GET_CODE (op
)) == '<'
593 && can_reverse_comparison_p (op
, NULL_RTX
))
594 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (op
)),
595 op_mode
, XEXP (op
, 0), XEXP (op
, 1));
599 /* (neg (neg X)) == X. */
600 if (GET_CODE (op
) == NEG
)
605 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
606 becomes just the MINUS if its mode is MODE. This allows
607 folding switch statements on machines using casesi (such as
609 if (GET_CODE (op
) == TRUNCATE
610 && GET_MODE (XEXP (op
, 0)) == mode
611 && GET_CODE (XEXP (op
, 0)) == MINUS
612 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
613 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
616 #ifdef POINTERS_EXTEND_UNSIGNED
617 if (! POINTERS_EXTEND_UNSIGNED
618 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
620 return convert_memory_address (Pmode
, op
);
624 #ifdef POINTERS_EXTEND_UNSIGNED
626 if (POINTERS_EXTEND_UNSIGNED
627 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
629 return convert_memory_address (Pmode
, op
);
641 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
642 and OP1. Return 0 if no simplification is possible.
644 Don't use this for relational operations such as EQ or LT.
645 Use simplify_relational_operation instead. */
648 simplify_binary_operation (code
, mode
, op0
, op1
)
650 enum machine_mode mode
;
653 register HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
655 unsigned int width
= GET_MODE_BITSIZE (mode
);
658 /* Relational operations don't work here. We must know the mode
659 of the operands in order to do the comparison correctly.
660 Assuming a full word can give incorrect results.
661 Consider comparing 128 with -128 in QImode. */
663 if (GET_RTX_CLASS (code
) == '<')
666 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
667 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
668 && GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
669 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
671 REAL_VALUE_TYPE f0
, f1
, value
;
674 if (setjmp (handler
))
677 set_float_handler (handler
);
679 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
680 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
681 f0
= real_value_truncate (mode
, f0
);
682 f1
= real_value_truncate (mode
, f1
);
684 #ifdef REAL_ARITHMETIC
685 #ifndef REAL_INFINITY
686 if (code
== DIV
&& REAL_VALUES_EQUAL (f1
, dconst0
))
689 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
703 #ifndef REAL_INFINITY
710 value
= MIN (f0
, f1
);
713 value
= MAX (f0
, f1
);
720 value
= real_value_truncate (mode
, value
);
721 set_float_handler (NULL_PTR
);
722 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
724 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
726 /* We can fold some multi-word operations. */
727 if (GET_MODE_CLASS (mode
) == MODE_INT
728 && width
== HOST_BITS_PER_WIDE_INT
* 2
729 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
730 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
732 unsigned HOST_WIDE_INT l1
, l2
, lv
;
733 HOST_WIDE_INT h1
, h2
, hv
;
735 if (GET_CODE (op0
) == CONST_DOUBLE
)
736 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
738 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
740 if (GET_CODE (op1
) == CONST_DOUBLE
)
741 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
743 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
748 /* A - B == A + (-B). */
749 neg_double (l2
, h2
, &lv
, &hv
);
752 /* .. fall through ... */
755 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
759 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
762 case DIV
: case MOD
: case UDIV
: case UMOD
:
763 /* We'd need to include tree.h to do this and it doesn't seem worth
768 lv
= l1
& l2
, hv
= h1
& h2
;
772 lv
= l1
| l2
, hv
= h1
| h2
;
776 lv
= l1
^ l2
, hv
= h1
^ h2
;
782 && ((unsigned HOST_WIDE_INT
) l1
783 < (unsigned HOST_WIDE_INT
) l2
)))
792 && ((unsigned HOST_WIDE_INT
) l1
793 > (unsigned HOST_WIDE_INT
) l2
)))
800 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
802 && ((unsigned HOST_WIDE_INT
) l1
803 < (unsigned HOST_WIDE_INT
) l2
)))
810 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
812 && ((unsigned HOST_WIDE_INT
) l1
813 > (unsigned HOST_WIDE_INT
) l2
)))
819 case LSHIFTRT
: case ASHIFTRT
:
821 case ROTATE
: case ROTATERT
:
822 #ifdef SHIFT_COUNT_TRUNCATED
823 if (SHIFT_COUNT_TRUNCATED
)
824 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
827 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
830 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
831 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
833 else if (code
== ASHIFT
)
834 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
835 else if (code
== ROTATE
)
836 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
837 else /* code == ROTATERT */
838 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
845 return immed_double_const (lv
, hv
, mode
);
848 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
849 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
851 /* Even if we can't compute a constant result,
852 there are some cases worth simplifying. */
857 /* In IEEE floating point, x+0 is not the same as x. Similarly
858 for the other optimizations below. */
859 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
860 && FLOAT_MODE_P (mode
) && ! flag_fast_math
)
863 if (op1
== CONST0_RTX (mode
))
866 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
867 if (GET_CODE (op0
) == NEG
)
868 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
869 else if (GET_CODE (op1
) == NEG
)
870 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
872 /* Handle both-operands-constant cases. We can only add
873 CONST_INTs to constants since the sum of relocatable symbols
874 can't be handled by most assemblers. Don't add CONST_INT
875 to CONST_INT since overflow won't be computed properly if wider
876 than HOST_BITS_PER_WIDE_INT. */
878 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
879 && GET_CODE (op1
) == CONST_INT
)
880 return plus_constant (op0
, INTVAL (op1
));
881 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
882 && GET_CODE (op0
) == CONST_INT
)
883 return plus_constant (op1
, INTVAL (op0
));
885 /* See if this is something like X * C - X or vice versa or
886 if the multiplication is written as a shift. If so, we can
887 distribute and make a new multiply, shift, or maybe just
888 have X (if C is 2 in the example above). But don't make
889 real multiply if we didn't have one before. */
891 if (! FLOAT_MODE_P (mode
))
893 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
894 rtx lhs
= op0
, rhs
= op1
;
897 if (GET_CODE (lhs
) == NEG
)
898 coeff0
= -1, lhs
= XEXP (lhs
, 0);
899 else if (GET_CODE (lhs
) == MULT
900 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
902 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
905 else if (GET_CODE (lhs
) == ASHIFT
906 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
907 && INTVAL (XEXP (lhs
, 1)) >= 0
908 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
910 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
914 if (GET_CODE (rhs
) == NEG
)
915 coeff1
= -1, rhs
= XEXP (rhs
, 0);
916 else if (GET_CODE (rhs
) == MULT
917 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
919 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
922 else if (GET_CODE (rhs
) == ASHIFT
923 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
924 && INTVAL (XEXP (rhs
, 1)) >= 0
925 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
927 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
931 if (rtx_equal_p (lhs
, rhs
))
933 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
934 GEN_INT (coeff0
+ coeff1
));
935 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
939 /* If one of the operands is a PLUS or a MINUS, see if we can
940 simplify this by the associative law.
941 Don't use the associative law for floating point.
942 The inaccuracy makes it nonassociative,
943 and subtle programs can break if operations are associated. */
945 if (INTEGRAL_MODE_P (mode
)
946 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
947 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
948 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
954 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
955 using cc0, in which case we want to leave it as a COMPARE
956 so we can distinguish it from a register-register-copy.
958 In IEEE floating point, x-0 is not the same as x. */
960 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
961 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
962 && op1
== CONST0_RTX (mode
))
966 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
967 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
968 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
969 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
971 rtx xop00
= XEXP (op0
, 0);
972 rtx xop10
= XEXP (op1
, 0);
975 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
977 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
978 && GET_MODE (xop00
) == GET_MODE (xop10
)
979 && REGNO (xop00
) == REGNO (xop10
)
980 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
981 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
988 /* None of these optimizations can be done for IEEE
990 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
991 && FLOAT_MODE_P (mode
) && ! flag_fast_math
)
994 /* We can't assume x-x is 0 even with non-IEEE floating point,
995 but since it is zero except in very strange circumstances, we
996 will treat it as zero with -ffast-math. */
997 if (rtx_equal_p (op0
, op1
)
998 && ! side_effects_p (op0
)
999 && (! FLOAT_MODE_P (mode
) || flag_fast_math
))
1000 return CONST0_RTX (mode
);
1002 /* Change subtraction from zero into negation. */
1003 if (op0
== CONST0_RTX (mode
))
1004 return gen_rtx_NEG (mode
, op1
);
1006 /* (-1 - a) is ~a. */
1007 if (op0
== constm1_rtx
)
1008 return gen_rtx_NOT (mode
, op1
);
1010 /* Subtracting 0 has no effect. */
1011 if (op1
== CONST0_RTX (mode
))
1014 /* See if this is something like X * C - X or vice versa or
1015 if the multiplication is written as a shift. If so, we can
1016 distribute and make a new multiply, shift, or maybe just
1017 have X (if C is 2 in the example above). But don't make
1018 real multiply if we didn't have one before. */
1020 if (! FLOAT_MODE_P (mode
))
1022 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1023 rtx lhs
= op0
, rhs
= op1
;
1026 if (GET_CODE (lhs
) == NEG
)
1027 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1028 else if (GET_CODE (lhs
) == MULT
1029 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1031 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1034 else if (GET_CODE (lhs
) == ASHIFT
1035 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1036 && INTVAL (XEXP (lhs
, 1)) >= 0
1037 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1039 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1040 lhs
= XEXP (lhs
, 0);
1043 if (GET_CODE (rhs
) == NEG
)
1044 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1045 else if (GET_CODE (rhs
) == MULT
1046 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1048 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1051 else if (GET_CODE (rhs
) == ASHIFT
1052 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1053 && INTVAL (XEXP (rhs
, 1)) >= 0
1054 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1056 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1057 rhs
= XEXP (rhs
, 0);
1060 if (rtx_equal_p (lhs
, rhs
))
1062 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1063 GEN_INT (coeff0
- coeff1
));
1064 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1068 /* (a - (-b)) -> (a + b). */
1069 if (GET_CODE (op1
) == NEG
)
1070 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1072 /* If one of the operands is a PLUS or a MINUS, see if we can
1073 simplify this by the associative law.
1074 Don't use the associative law for floating point.
1075 The inaccuracy makes it nonassociative,
1076 and subtle programs can break if operations are associated. */
1078 if (INTEGRAL_MODE_P (mode
)
1079 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1080 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
1081 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1084 /* Don't let a relocatable value get a negative coeff. */
1085 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1086 return plus_constant (op0
, - INTVAL (op1
));
1088 /* (x - (x & y)) -> (x & ~y) */
1089 if (GET_CODE (op1
) == AND
)
1091 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1092 return simplify_gen_binary (AND
, mode
, op0
,
1093 gen_rtx_NOT (mode
, XEXP (op1
, 1)));
1094 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1095 return simplify_gen_binary (AND
, mode
, op0
,
1096 gen_rtx_NOT (mode
, XEXP (op1
, 0)));
1101 if (op1
== constm1_rtx
)
1103 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1105 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1108 /* In IEEE floating point, x*0 is not always 0. */
1109 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1110 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
1111 && op1
== CONST0_RTX (mode
)
1112 && ! side_effects_p (op0
))
1115 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1116 However, ANSI says we can drop signals,
1117 so we can do this anyway. */
1118 if (op1
== CONST1_RTX (mode
))
1121 /* Convert multiply by constant power of two into shift unless
1122 we are still generating RTL. This test is a kludge. */
1123 if (GET_CODE (op1
) == CONST_INT
1124 && (val
= exact_log2 (INTVAL (op1
))) >= 0
1125 /* If the mode is larger than the host word size, and the
1126 uppermost bit is set, then this isn't a power of two due
1127 to implicit sign extension. */
1128 && (width
<= HOST_BITS_PER_WIDE_INT
1129 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1130 && ! rtx_equal_function_value_matters
)
1131 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1133 if (GET_CODE (op1
) == CONST_DOUBLE
1134 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
)
1138 int op1is2
, op1ism1
;
1140 if (setjmp (handler
))
1143 set_float_handler (handler
);
1144 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1145 op1is2
= REAL_VALUES_EQUAL (d
, dconst2
);
1146 op1ism1
= REAL_VALUES_EQUAL (d
, dconstm1
);
1147 set_float_handler (NULL_PTR
);
1149 /* x*2 is x+x and x*(-1) is -x */
1150 if (op1is2
&& GET_MODE (op0
) == mode
)
1151 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1153 else if (op1ism1
&& GET_MODE (op0
) == mode
)
1154 return gen_rtx_NEG (mode
, op0
);
1159 if (op1
== const0_rtx
)
1161 if (GET_CODE (op1
) == CONST_INT
1162 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1164 if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1166 /* A | (~A) -> -1 */
1167 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1168 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1169 && ! side_effects_p (op0
)
1170 && GET_MODE_CLASS (mode
) != MODE_CC
)
1175 if (op1
== const0_rtx
)
1177 if (GET_CODE (op1
) == CONST_INT
1178 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1179 return gen_rtx_NOT (mode
, op0
);
1180 if (op0
== op1
&& ! side_effects_p (op0
)
1181 && GET_MODE_CLASS (mode
) != MODE_CC
)
1186 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1188 if (GET_CODE (op1
) == CONST_INT
1189 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1191 if (op0
== op1
&& ! side_effects_p (op0
)
1192 && GET_MODE_CLASS (mode
) != MODE_CC
)
1195 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1196 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1197 && ! side_effects_p (op0
)
1198 && GET_MODE_CLASS (mode
) != MODE_CC
)
1203 /* Convert divide by power of two into shift (divide by 1 handled
1205 if (GET_CODE (op1
) == CONST_INT
1206 && (arg1
= exact_log2 (INTVAL (op1
))) > 0)
1207 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1209 /* ... fall through ... */
1212 if (op1
== CONST1_RTX (mode
))
1215 /* In IEEE floating point, 0/x is not always 0. */
1216 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1217 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
1218 && op0
== CONST0_RTX (mode
)
1219 && ! side_effects_p (op1
))
1222 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1223 /* Change division by a constant into multiplication. Only do
1224 this with -ffast-math until an expert says it is safe in
1226 else if (GET_CODE (op1
) == CONST_DOUBLE
1227 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
1228 && op1
!= CONST0_RTX (mode
)
1232 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1234 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1236 #if defined (REAL_ARITHMETIC)
1237 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1238 return gen_rtx_MULT (mode
, op0
,
1239 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1242 gen_rtx_MULT (mode
, op0
,
1243 CONST_DOUBLE_FROM_REAL_VALUE (1./d
, mode
));
1251 /* Handle modulus by power of two (mod with 1 handled below). */
1252 if (GET_CODE (op1
) == CONST_INT
1253 && exact_log2 (INTVAL (op1
)) > 0)
1254 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1256 /* ... fall through ... */
1259 if ((op0
== const0_rtx
|| op1
== const1_rtx
)
1260 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1266 /* Rotating ~0 always results in ~0. */
1267 if (GET_CODE (op0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1268 && (unsigned HOST_WIDE_INT
) INTVAL (op0
) == GET_MODE_MASK (mode
)
1269 && ! side_effects_p (op1
))
1272 /* ... fall through ... */
1277 if (op1
== const0_rtx
)
1279 if (op0
== const0_rtx
&& ! side_effects_p (op1
))
1284 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1285 && INTVAL (op1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1286 && ! side_effects_p (op0
))
1288 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1293 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1294 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
1295 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1296 && ! side_effects_p (op0
))
1298 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1303 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1305 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1310 if (op1
== constm1_rtx
&& ! side_effects_p (op0
))
1312 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1323 /* Get the integer argument values in two forms:
1324 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1326 arg0
= INTVAL (op0
);
1327 arg1
= INTVAL (op1
);
1329 if (width
< HOST_BITS_PER_WIDE_INT
)
1331 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1332 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1335 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1336 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1339 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1340 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1348 /* Compute the value of the arithmetic. */
1353 val
= arg0s
+ arg1s
;
1357 val
= arg0s
- arg1s
;
1361 val
= arg0s
* arg1s
;
1367 val
= arg0s
/ arg1s
;
1373 val
= arg0s
% arg1s
;
1379 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1385 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1401 /* If shift count is undefined, don't fold it; let the machine do
1402 what it wants. But truncate it if the machine will do that. */
1406 #ifdef SHIFT_COUNT_TRUNCATED
1407 if (SHIFT_COUNT_TRUNCATED
)
1411 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1418 #ifdef SHIFT_COUNT_TRUNCATED
1419 if (SHIFT_COUNT_TRUNCATED
)
1423 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1430 #ifdef SHIFT_COUNT_TRUNCATED
1431 if (SHIFT_COUNT_TRUNCATED
)
1435 val
= arg0s
>> arg1
;
1437 /* Bootstrap compiler may not have sign extended the right shift.
1438 Manually extend the sign to insure bootstrap cc matches gcc. */
1439 if (arg0s
< 0 && arg1
> 0)
1440 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1449 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1450 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1458 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1459 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1463 /* Do nothing here. */
1467 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1471 val
= ((unsigned HOST_WIDE_INT
) arg0
1472 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1476 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1480 val
= ((unsigned HOST_WIDE_INT
) arg0
1481 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1488 val
= trunc_int_for_mode (val
, mode
);
1490 return GEN_INT (val
);
1493 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1496 Rather than test for specific case, we do this by a brute-force method
1497 and do all possible simplifications until no more changes occur. Then
1498 we rebuild the operation. */
1501 simplify_plus_minus (code
, mode
, op0
, op1
)
1503 enum machine_mode mode
;
1509 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
= 0;
1510 int first
= 1, negate
= 0, changed
;
1513 bzero ((char *) ops
, sizeof ops
);
1515 /* Set up the two operands and then expand them until nothing has been
1516 changed. If we run out of room in our array, give up; this should
1517 almost never happen. */
1519 ops
[0] = op0
, ops
[1] = op1
, negs
[0] = 0, negs
[1] = (code
== MINUS
);
1526 for (i
= 0; i
< n_ops
; i
++)
1527 switch (GET_CODE (ops
[i
]))
1534 ops
[n_ops
] = XEXP (ops
[i
], 1);
1535 negs
[n_ops
++] = GET_CODE (ops
[i
]) == MINUS
? !negs
[i
] : negs
[i
];
1536 ops
[i
] = XEXP (ops
[i
], 0);
1542 ops
[i
] = XEXP (ops
[i
], 0);
1543 negs
[i
] = ! negs
[i
];
1548 ops
[i
] = XEXP (ops
[i
], 0);
1554 /* ~a -> (-a - 1) */
1557 ops
[n_ops
] = constm1_rtx
;
1558 negs
[n_ops
++] = negs
[i
];
1559 ops
[i
] = XEXP (ops
[i
], 0);
1560 negs
[i
] = ! negs
[i
];
1567 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0, changed
= 1;
1575 /* If we only have two operands, we can't do anything. */
1579 /* Now simplify each pair of operands until nothing changes. The first
1580 time through just simplify constants against each other. */
1587 for (i
= 0; i
< n_ops
- 1; i
++)
1588 for (j
= i
+ 1; j
< n_ops
; j
++)
1589 if (ops
[i
] != 0 && ops
[j
] != 0
1590 && (! first
|| (CONSTANT_P (ops
[i
]) && CONSTANT_P (ops
[j
]))))
1592 rtx lhs
= ops
[i
], rhs
= ops
[j
];
1593 enum rtx_code ncode
= PLUS
;
1595 if (negs
[i
] && ! negs
[j
])
1596 lhs
= ops
[j
], rhs
= ops
[i
], ncode
= MINUS
;
1597 else if (! negs
[i
] && negs
[j
])
1600 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
1603 ops
[i
] = tem
, ops
[j
] = 0;
1604 negs
[i
] = negs
[i
] && negs
[j
];
1605 if (GET_CODE (tem
) == NEG
)
1606 ops
[i
] = XEXP (tem
, 0), negs
[i
] = ! negs
[i
];
1608 if (GET_CODE (ops
[i
]) == CONST_INT
&& negs
[i
])
1609 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0;
1617 /* Pack all the operands to the lower-numbered entries and give up if
1618 we didn't reduce the number of operands we had. Make sure we
1619 count a CONST as two operands. If we have the same number of
1620 operands, but have made more CONSTs than we had, this is also
1621 an improvement, so accept it. */
1623 for (i
= 0, j
= 0; j
< n_ops
; j
++)
1626 ops
[i
] = ops
[j
], negs
[i
++] = negs
[j
];
1627 if (GET_CODE (ops
[j
]) == CONST
)
1631 if (i
+ n_consts
> input_ops
1632 || (i
+ n_consts
== input_ops
&& n_consts
<= input_consts
))
1637 /* If we have a CONST_INT, put it last. */
1638 for (i
= 0; i
< n_ops
- 1; i
++)
1639 if (GET_CODE (ops
[i
]) == CONST_INT
)
1641 tem
= ops
[n_ops
- 1], ops
[n_ops
- 1] = ops
[i
] , ops
[i
] = tem
;
1642 j
= negs
[n_ops
- 1], negs
[n_ops
- 1] = negs
[i
], negs
[i
] = j
;
1645 /* Put a non-negated operand first. If there aren't any, make all
1646 operands positive and negate the whole thing later. */
1647 for (i
= 0; i
< n_ops
&& negs
[i
]; i
++)
1652 for (i
= 0; i
< n_ops
; i
++)
1658 tem
= ops
[0], ops
[0] = ops
[i
], ops
[i
] = tem
;
1659 j
= negs
[0], negs
[0] = negs
[i
], negs
[i
] = j
;
1662 /* Now make the result by performing the requested operations. */
1664 for (i
= 1; i
< n_ops
; i
++)
1665 result
= simplify_gen_binary (negs
[i
] ? MINUS
: PLUS
, mode
, result
, ops
[i
]);
1667 return negate
? gen_rtx_NEG (mode
, result
) : result
;
1672 rtx op0
, op1
; /* Input */
1673 int equal
, op0lt
, op1lt
; /* Output */
1677 check_fold_consts (data
)
1680 struct cfc_args
*args
= (struct cfc_args
*) data
;
1681 REAL_VALUE_TYPE d0
, d1
;
1683 REAL_VALUE_FROM_CONST_DOUBLE (d0
, args
->op0
);
1684 REAL_VALUE_FROM_CONST_DOUBLE (d1
, args
->op1
);
1685 args
->equal
= REAL_VALUES_EQUAL (d0
, d1
);
1686 args
->op0lt
= REAL_VALUES_LESS (d0
, d1
);
1687 args
->op1lt
= REAL_VALUES_LESS (d1
, d0
);
1690 /* Like simplify_binary_operation except used for relational operators.
1691 MODE is the mode of the operands, not that of the result. If MODE
1692 is VOIDmode, both operands must also be VOIDmode and we compare the
1693 operands in "infinite precision".
1695 If no simplification is possible, this function returns zero. Otherwise,
1696 it returns either const_true_rtx or const0_rtx. */
1699 simplify_relational_operation (code
, mode
, op0
, op1
)
1701 enum machine_mode mode
;
1704 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
1707 if (mode
== VOIDmode
1708 && (GET_MODE (op0
) != VOIDmode
1709 || GET_MODE (op1
) != VOIDmode
))
1712 /* If op0 is a compare, extract the comparison arguments from it. */
1713 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
1714 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
1716 /* We can't simplify MODE_CC values since we don't know what the
1717 actual comparison is. */
1718 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
1725 /* Make sure the constant is second. */
1726 if ((CONSTANT_P (op0
) && ! CONSTANT_P (op1
))
1727 || (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) != CONST_INT
))
1729 tem
= op0
, op0
= op1
, op1
= tem
;
1730 code
= swap_condition (code
);
1733 /* For integer comparisons of A and B maybe we can simplify A - B and can
1734 then simplify a comparison of that with zero. If A and B are both either
1735 a register or a CONST_INT, this can't help; testing for these cases will
1736 prevent infinite recursion here and speed things up.
1738 If CODE is an unsigned comparison, then we can never do this optimization,
1739 because it gives an incorrect result if the subtraction wraps around zero.
1740 ANSI C defines unsigned operations such that they never overflow, and
1741 thus such cases can not be ignored. */
1743 if (INTEGRAL_MODE_P (mode
) && op1
!= const0_rtx
1744 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == CONST_INT
)
1745 && (GET_CODE (op1
) == REG
|| GET_CODE (op1
) == CONST_INT
))
1746 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
1747 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
1748 return simplify_relational_operation (signed_condition (code
),
1749 mode
, tem
, const0_rtx
);
1751 /* For non-IEEE floating-point, if the two operands are equal, we know the
1753 if (rtx_equal_p (op0
, op1
)
1754 && (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1755 || ! FLOAT_MODE_P (GET_MODE (op0
)) || flag_fast_math
))
1756 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
1758 /* If the operands are floating-point constants, see if we can fold
1760 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1761 else if (GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
1762 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
1764 struct cfc_args args
;
1766 /* Setup input for check_fold_consts() */
1770 if (do_float_handler(check_fold_consts
, (PTR
) &args
) == 0)
1771 /* We got an exception from check_fold_consts() */
1774 /* Receive output from check_fold_consts() */
1776 op0lt
= op0ltu
= args
.op0lt
;
1777 op1lt
= op1ltu
= args
.op1lt
;
1779 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1781 /* Otherwise, see if the operands are both integers. */
1782 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
1783 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
1784 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
1786 int width
= GET_MODE_BITSIZE (mode
);
1787 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
1788 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
1790 /* Get the two words comprising each integer constant. */
1791 if (GET_CODE (op0
) == CONST_DOUBLE
)
1793 l0u
= l0s
= CONST_DOUBLE_LOW (op0
);
1794 h0u
= h0s
= CONST_DOUBLE_HIGH (op0
);
1798 l0u
= l0s
= INTVAL (op0
);
1799 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
1802 if (GET_CODE (op1
) == CONST_DOUBLE
)
1804 l1u
= l1s
= CONST_DOUBLE_LOW (op1
);
1805 h1u
= h1s
= CONST_DOUBLE_HIGH (op1
);
1809 l1u
= l1s
= INTVAL (op1
);
1810 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
1813 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1814 we have to sign or zero-extend the values. */
1815 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
1816 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
1818 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
1820 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1821 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1823 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1824 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1826 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1827 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1830 equal
= (h0u
== h1u
&& l0u
== l1u
);
1831 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
1832 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
1833 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
1834 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
1837 /* Otherwise, there are some code-specific tests we can make. */
1843 /* References to the frame plus a constant or labels cannot
1844 be zero, but a SYMBOL_REF can due to #pragma weak. */
1845 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1846 || GET_CODE (op0
) == LABEL_REF
)
1847 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1848 /* On some machines, the ap reg can be 0 sometimes. */
1849 && op0
!= arg_pointer_rtx
1856 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1857 || GET_CODE (op0
) == LABEL_REF
)
1858 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1859 && op0
!= arg_pointer_rtx
1862 return const_true_rtx
;
1866 /* Unsigned values are never negative. */
1867 if (op1
== const0_rtx
)
1868 return const_true_rtx
;
1872 if (op1
== const0_rtx
)
1877 /* Unsigned values are never greater than the largest
1879 if (GET_CODE (op1
) == CONST_INT
1880 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
1881 && INTEGRAL_MODE_P (mode
))
1882 return const_true_rtx
;
1886 if (GET_CODE (op1
) == CONST_INT
1887 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
1888 && INTEGRAL_MODE_P (mode
))
1899 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
1904 return equal
? const_true_rtx
: const0_rtx
;
1906 return ! equal
? const_true_rtx
: const0_rtx
;
1908 return op0lt
? const_true_rtx
: const0_rtx
;
1910 return op1lt
? const_true_rtx
: const0_rtx
;
1912 return op0ltu
? const_true_rtx
: const0_rtx
;
1914 return op1ltu
? const_true_rtx
: const0_rtx
;
1916 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
1918 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
1920 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
1922 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
1928 /* Simplify CODE, an operation with result mode MODE and three operands,
1929 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
1930 a constant. Return 0 if no simplifications is possible. */
1933 simplify_ternary_operation (code
, mode
, op0_mode
, op0
, op1
, op2
)
1935 enum machine_mode mode
, op0_mode
;
1938 unsigned int width
= GET_MODE_BITSIZE (mode
);
1940 /* VOIDmode means "infinite" precision. */
1942 width
= HOST_BITS_PER_WIDE_INT
;
1948 if (GET_CODE (op0
) == CONST_INT
1949 && GET_CODE (op1
) == CONST_INT
1950 && GET_CODE (op2
) == CONST_INT
1951 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
1952 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
1954 /* Extracting a bit-field from a constant */
1955 HOST_WIDE_INT val
= INTVAL (op0
);
1957 if (BITS_BIG_ENDIAN
)
1958 val
>>= (GET_MODE_BITSIZE (op0_mode
)
1959 - INTVAL (op2
) - INTVAL (op1
));
1961 val
>>= INTVAL (op2
);
1963 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
1965 /* First zero-extend. */
1966 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
1967 /* If desired, propagate sign bit. */
1968 if (code
== SIGN_EXTRACT
1969 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
1970 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
1973 /* Clear the bits that don't belong in our mode,
1974 unless they and our sign bit are all one.
1975 So we get either a reasonable negative value or a reasonable
1976 unsigned value for this mode. */
1977 if (width
< HOST_BITS_PER_WIDE_INT
1978 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
1979 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
1980 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1982 return GEN_INT (val
);
1987 if (GET_CODE (op0
) == CONST_INT
)
1988 return op0
!= const0_rtx
? op1
: op2
;
1990 /* Convert a == b ? b : a to "a". */
1991 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
1992 && (! FLOAT_MODE_P (mode
) || flag_fast_math
)
1993 && rtx_equal_p (XEXP (op0
, 0), op1
)
1994 && rtx_equal_p (XEXP (op0
, 1), op2
))
1996 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
1997 && (! FLOAT_MODE_P (mode
) || flag_fast_math
)
1998 && rtx_equal_p (XEXP (op0
, 1), op1
)
1999 && rtx_equal_p (XEXP (op0
, 0), op2
))
2001 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2003 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2004 ? GET_MODE (XEXP (op0
, 1))
2005 : GET_MODE (XEXP (op0
, 0)));
2007 = simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2008 XEXP (op0
, 0), XEXP (op0
, 1));
2010 /* See if any simplifications were possible. */
2011 if (temp
== const0_rtx
)
2013 else if (temp
== const1_rtx
)
2018 /* Look for happy constants in op1 and op2. */
2019 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2021 HOST_WIDE_INT t
= INTVAL (op1
);
2022 HOST_WIDE_INT f
= INTVAL (op2
);
2024 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2025 code
= GET_CODE (op0
);
2026 else if (t
== 0 && f
== STORE_FLAG_VALUE
2027 && can_reverse_comparison_p (op0
, NULL_RTX
))
2028 code
= reverse_condition (GET_CODE (op0
));
2032 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2044 /* Simplify X, an rtx expression.
2046 Return the simplified expression or NULL if no simplifications
2049 This is the preferred entry point into the simplification routines;
2050 however, we still allow passes to call the more specific routines.
2052 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2053 code that need to be unified.
2055 1. fold_rtx in cse.c. This code uses various CSE specific
2056 information to aid in RTL simplification.
2058 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2059 it uses combine specific information to aid in RTL
2062 3. The routines in this file.
2065 Long term we want to only have one body of simplification code; to
2066 get to that state I recommend the following steps:
2068 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2069 which are not pass dependent state into these routines.
2071 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2072 use this routine whenever possible.
2074 3. Allow for pass dependent state to be provided to these
2075 routines and add simplifications based on the pass dependent
2076 state. Remove code from cse.c & combine.c that becomes
2079 It will take time, but ultimately the compiler will be easier to
2080 maintain and improve. It's totally silly that when we add a
2081 simplification that it needs to be added to 4 places (3 for RTL
2082 simplification and 1 for tree simplification. */
2089 enum machine_mode mode
;
2091 mode
= GET_MODE (x
);
2092 code
= GET_CODE (x
);
2094 switch (GET_RTX_CLASS (code
))
2097 return simplify_unary_operation (code
, mode
,
2098 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
2101 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
2105 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
2106 XEXP (x
, 0), XEXP (x
, 1), XEXP (x
, 2));
2109 return simplify_relational_operation (code
,
2110 (GET_MODE (XEXP (x
, 0)) != VOIDmode
2111 ? GET_MODE (XEXP (x
, 0))
2112 : GET_MODE (XEXP (x
, 1))),
2113 XEXP (x
, 0), XEXP (x
, 1));
2120 /* Allocate a struct elt_list and fill in its two elements with the
2123 static struct elt_list
*
2124 new_elt_list (next
, elt
)
2125 struct elt_list
*next
;
2128 struct elt_list
*el
= empty_elt_lists
;
2131 empty_elt_lists
= el
->next
;
2133 el
= (struct elt_list
*) obstack_alloc (&cselib_obstack
,
2134 sizeof (struct elt_list
));
2140 /* Allocate a struct elt_loc_list and fill in its two elements with the
2143 static struct elt_loc_list
*
2144 new_elt_loc_list (next
, loc
)
2145 struct elt_loc_list
*next
;
2148 struct elt_loc_list
*el
= empty_elt_loc_lists
;
2151 empty_elt_loc_lists
= el
->next
;
2153 el
= (struct elt_loc_list
*) obstack_alloc (&cselib_obstack
,
2154 sizeof (struct elt_loc_list
));
2157 el
->setting_insn
= cselib_current_insn
;
2161 /* The elt_list at *PL is no longer needed. Unchain it and free its
2165 unchain_one_elt_list (pl
)
2166 struct elt_list
**pl
;
2168 struct elt_list
*l
= *pl
;
2171 l
->next
= empty_elt_lists
;
2172 empty_elt_lists
= l
;
2175 /* Likewise for elt_loc_lists. */
2178 unchain_one_elt_loc_list (pl
)
2179 struct elt_loc_list
**pl
;
2181 struct elt_loc_list
*l
= *pl
;
2184 l
->next
= empty_elt_loc_lists
;
2185 empty_elt_loc_lists
= l
;
2188 /* Likewise for cselib_vals. This also frees the addr_list associated with
2192 unchain_one_value (v
)
2195 while (v
->addr_list
)
2196 unchain_one_elt_list (&v
->addr_list
);
2198 v
->u
.next_free
= empty_vals
;
2202 /* Remove all entries from the hash table. Also used during
2210 for (i
= 0; i
< cselib_nregs
; i
++)
2213 htab_empty (hash_table
);
2214 obstack_free (&cselib_obstack
, cselib_startobj
);
2217 empty_elt_lists
= 0;
2218 empty_elt_loc_lists
= 0;
2219 n_useless_values
= 0;
2221 next_unknown_value
= 0;
2224 /* The equality test for our hash table. The first argument ENTRY is a table
2225 element (i.e. a cselib_val), while the second arg X is an rtx. */
2228 entry_and_rtx_equal_p (entry
, x_arg
)
2229 const void *entry
, *x_arg
;
2231 struct elt_loc_list
*l
;
2232 const cselib_val
*v
= (const cselib_val
*) entry
;
2233 rtx x
= (rtx
) x_arg
;
2235 /* We don't guarantee that distinct rtx's have different hash values,
2236 so we need to do a comparison. */
2237 for (l
= v
->locs
; l
; l
= l
->next
)
2238 if (rtx_equal_for_cselib_p (l
->loc
, x
))
2244 /* The hash function for our hash table. The value is always computed with
2245 hash_rtx when adding an element; this function just extracts the hash
2246 value from a cselib_val structure. */
2249 get_value_hash (entry
)
2252 const cselib_val
*v
= (const cselib_val
*) entry
;
2256 /* Return true if X contains a VALUE rtx. If ONLY_USELESS is set, we
2257 only return true for values which point to a cselib_val whose value
2258 element has been set to zero, which implies the cselib_val will be
2262 references_value_p (x
, only_useless
)
2266 enum rtx_code code
= GET_CODE (x
);
2267 const char *fmt
= GET_RTX_FORMAT (code
);
2270 if (GET_CODE (x
) == VALUE
2271 && (! only_useless
|| CSELIB_VAL_PTR (x
)->locs
== 0))
2274 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2276 if (fmt
[i
] == 'e' && references_value_p (XEXP (x
, i
), only_useless
))
2278 else if (fmt
[i
] == 'E')
2279 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2280 if (references_value_p (XVECEXP (x
, i
, j
), only_useless
))
2287 /* For all locations found in X, delete locations that reference useless
2288 values (i.e. values without any location). Called through
2292 discard_useless_locs (x
, info
)
2294 void *info ATTRIBUTE_UNUSED
;
2296 cselib_val
*v
= (cselib_val
*)*x
;
2297 struct elt_loc_list
**p
= &v
->locs
;
2298 int had_locs
= v
->locs
!= 0;
2302 if (references_value_p ((*p
)->loc
, 1))
2303 unchain_one_elt_loc_list (p
);
2308 if (had_locs
&& v
->locs
== 0)
2311 values_became_useless
= 1;
2316 /* If X is a value with no locations, remove it from the hashtable. */
2319 discard_useless_values (x
, info
)
2321 void *info ATTRIBUTE_UNUSED
;
2323 cselib_val
*v
= (cselib_val
*)*x
;
2327 htab_clear_slot (hash_table
, x
);
2328 unchain_one_value (v
);
2335 /* Clean out useless values (i.e. those which no longer have locations
2336 associated with them) from the hash table. */
2339 remove_useless_values ()
2341 /* First pass: eliminate locations that reference the value. That in
2342 turn can make more values useless. */
2345 values_became_useless
= 0;
2346 htab_traverse (hash_table
, discard_useless_locs
, 0);
2348 while (values_became_useless
);
2350 /* Second pass: actually remove the values. */
2351 htab_traverse (hash_table
, discard_useless_values
, 0);
2353 if (n_useless_values
!= 0)
2357 /* Return nonzero if we can prove that X and Y contain the same value, taking
2358 our gathered information into account. */
2361 rtx_equal_for_cselib_p (x
, y
)
2368 if (GET_CODE (x
) == REG
|| GET_CODE (x
) == MEM
)
2370 cselib_val
*e
= cselib_lookup (x
, VOIDmode
, 0);
2376 if (GET_CODE (y
) == REG
|| GET_CODE (y
) == MEM
)
2378 cselib_val
*e
= cselib_lookup (y
, VOIDmode
, 0);
2387 if (GET_CODE (x
) == VALUE
&& GET_CODE (y
) == VALUE
)
2388 return CSELIB_VAL_PTR (x
) == CSELIB_VAL_PTR (y
);
2390 if (GET_CODE (x
) == VALUE
)
2392 cselib_val
*e
= CSELIB_VAL_PTR (x
);
2393 struct elt_loc_list
*l
;
2395 for (l
= e
->locs
; l
; l
= l
->next
)
2399 /* Avoid infinite recursion. */
2400 if (GET_CODE (t
) == REG
|| GET_CODE (t
) == MEM
)
2402 else if (rtx_equal_for_cselib_p (t
, y
))
2409 if (GET_CODE (y
) == VALUE
)
2411 cselib_val
*e
= CSELIB_VAL_PTR (y
);
2412 struct elt_loc_list
*l
;
2414 for (l
= e
->locs
; l
; l
= l
->next
)
2418 if (GET_CODE (t
) == REG
|| GET_CODE (t
) == MEM
)
2420 else if (rtx_equal_for_cselib_p (x
, t
))
2427 if (GET_CODE (x
) != GET_CODE (y
) || GET_MODE (x
) != GET_MODE (y
))
2430 /* This won't be handled correctly by the code below. */
2431 if (GET_CODE (x
) == LABEL_REF
)
2432 return XEXP (x
, 0) == XEXP (y
, 0);
2434 code
= GET_CODE (x
);
2435 fmt
= GET_RTX_FORMAT (code
);
2437 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2444 if (XWINT (x
, i
) != XWINT (y
, i
))
2450 if (XINT (x
, i
) != XINT (y
, i
))
2456 /* Two vectors must have the same length. */
2457 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
2460 /* And the corresponding elements must match. */
2461 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2462 if (! rtx_equal_for_cselib_p (XVECEXP (x
, i
, j
),
2468 if (! rtx_equal_for_cselib_p (XEXP (x
, i
), XEXP (y
, i
)))
2474 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
2479 /* These are just backpointers, so they don't matter. */
2486 /* It is believed that rtx's at this level will never
2487 contain anything but integers and other rtx's,
2488 except for within LABEL_REFs and SYMBOL_REFs. */
2496 /* Hash an rtx. Return 0 if we couldn't hash the rtx.
2497 For registers and memory locations, we look up their cselib_val structure
2498 and return its VALUE element.
2499 Possible reasons for return 0 are: the object is volatile, or we couldn't
2500 find a register or memory location in the table and CREATE is zero. If
2501 CREATE is nonzero, table elts are created for regs and mem.
2502 MODE is used in hashing for CONST_INTs only;
2503 otherwise the mode of X is used. */
2506 hash_rtx (x
, mode
, create
)
2508 enum machine_mode mode
;
2515 unsigned int hash
= 0;
2517 /* repeat is used to turn tail-recursion into iteration. */
2519 code
= GET_CODE (x
);
2520 hash
+= (unsigned) code
+ (unsigned) GET_MODE (x
);
2526 e
= cselib_lookup (x
, GET_MODE (x
), create
);
2534 hash
+= ((unsigned) CONST_INT
<< 7) + (unsigned) mode
+ INTVAL (x
);
2535 return hash
? hash
: CONST_INT
;
2538 /* This is like the general case, except that it only counts
2539 the integers representing the constant. */
2540 hash
+= (unsigned) code
+ (unsigned) GET_MODE (x
);
2541 if (GET_MODE (x
) != VOIDmode
)
2542 for (i
= 2; i
< GET_RTX_LENGTH (CONST_DOUBLE
); i
++)
2543 hash
+= XWINT (x
, i
);
2545 hash
+= ((unsigned) CONST_DOUBLE_LOW (x
)
2546 + (unsigned) CONST_DOUBLE_HIGH (x
));
2547 return hash
? hash
: CONST_DOUBLE
;
2549 /* Assume there is only one rtx object for any given label. */
2552 += ((unsigned) LABEL_REF
<< 7) + (unsigned long) XEXP (x
, 0);
2553 return hash
? hash
: LABEL_REF
;
2557 += ((unsigned) SYMBOL_REF
<< 7) + (unsigned long) XSTR (x
, 0);
2558 return hash
? hash
: SYMBOL_REF
;
2569 case UNSPEC_VOLATILE
:
2573 if (MEM_VOLATILE_P (x
))
2582 i
= GET_RTX_LENGTH (code
) - 1;
2583 fmt
= GET_RTX_FORMAT (code
);
2588 rtx tem
= XEXP (x
, i
);
2589 unsigned int tem_hash
;
2591 /* If we are about to do the last recursive call
2592 needed at this level, change it into iteration.
2593 This function is called enough to be worth it. */
2600 tem_hash
= hash_rtx (tem
, 0, create
);
2606 else if (fmt
[i
] == 'E')
2607 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2609 unsigned int tem_hash
= hash_rtx (XVECEXP (x
, i
, j
), 0, create
);
2616 else if (fmt
[i
] == 's')
2618 const unsigned char *p
= (const unsigned char *) XSTR (x
, i
);
2624 else if (fmt
[i
] == 'i')
2625 hash
+= XINT (x
, i
);
2626 else if (fmt
[i
] == '0' || fmt
[i
] == 't')
2632 return hash
? hash
: 1 + GET_CODE (x
);
2635 /* Create a new value structure for VALUE and initialize it. The mode of the
2639 new_cselib_val (value
, mode
)
2641 enum machine_mode mode
;
2643 cselib_val
*e
= empty_vals
;
2646 empty_vals
= e
->u
.next_free
;
2648 e
= (cselib_val
*) obstack_alloc (&cselib_obstack
, sizeof (cselib_val
));
2654 e
->u
.val_rtx
= gen_rtx_VALUE (mode
);
2655 CSELIB_VAL_PTR (e
->u
.val_rtx
) = e
;
2661 /* ADDR_ELT is a value that is used as address. MEM_ELT is the value that
2662 contains the data at this address. X is a MEM that represents the
2663 value. Update the two value structures to represent this situation. */
2666 add_mem_for_addr (addr_elt
, mem_elt
, x
)
2667 cselib_val
*addr_elt
, *mem_elt
;
2671 struct elt_loc_list
*l
;
2673 /* Avoid duplicates. */
2674 for (l
= mem_elt
->locs
; l
; l
= l
->next
)
2675 if (GET_CODE (l
->loc
) == MEM
2676 && CSELIB_VAL_PTR (XEXP (l
->loc
, 0)) == addr_elt
)
2679 new = gen_rtx_MEM (GET_MODE (x
), addr_elt
->u
.val_rtx
);
2680 MEM_COPY_ATTRIBUTES (new, x
);
2682 addr_elt
->addr_list
= new_elt_list (addr_elt
->addr_list
, mem_elt
);
2683 mem_elt
->locs
= new_elt_loc_list (mem_elt
->locs
, new);
2686 /* Subroutine of cselib_lookup. Return a value for X, which is a MEM rtx.
2687 If CREATE, make a new one if we haven't seen it before. */
2690 cselib_lookup_mem (x
, create
)
2696 cselib_val
*mem_elt
;
2699 if (MEM_VOLATILE_P (x
) || GET_MODE (x
) == BLKmode
2700 || (FLOAT_MODE_P (GET_MODE (x
)) && flag_float_store
))
2703 /* Look up the value for the address. */
2704 addr
= cselib_lookup (XEXP (x
, 0), GET_MODE (x
), create
);
2708 /* Find a value that describes a value of our mode at that address. */
2709 for (l
= addr
->addr_list
; l
; l
= l
->next
)
2710 if (GET_MODE (l
->elt
->u
.val_rtx
) == GET_MODE (x
))
2716 mem_elt
= new_cselib_val (++next_unknown_value
, GET_MODE (x
));
2717 add_mem_for_addr (addr
, mem_elt
, x
);
2718 slot
= htab_find_slot_with_hash (hash_table
, x
, mem_elt
->value
, INSERT
);
2723 /* Walk rtx X and replace all occurrences of REG and MEM subexpressions
2724 with VALUE expressions. This way, it becomes independent of changes
2725 to registers and memory.
2726 X isn't actually modified; if modifications are needed, new rtl is
2727 allocated. However, the return value can share rtl with X. */
2730 cselib_subst_to_values (x
)
2733 enum rtx_code code
= GET_CODE (x
);
2734 const char *fmt
= GET_RTX_FORMAT (code
);
2743 for (l
= REG_VALUES (REGNO (x
)); l
; l
= l
->next
)
2744 if (GET_MODE (l
->elt
->u
.val_rtx
) == GET_MODE (x
))
2745 return l
->elt
->u
.val_rtx
;
2750 e
= cselib_lookup_mem (x
, 0);
2753 return e
->u
.val_rtx
;
2755 /* CONST_DOUBLEs must be special-cased here so that we won't try to
2756 look up the CONST_DOUBLE_MEM inside. */
2765 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2769 rtx t
= cselib_subst_to_values (XEXP (x
, i
));
2771 if (t
!= XEXP (x
, i
) && x
== copy
)
2772 copy
= shallow_copy_rtx (x
);
2776 else if (fmt
[i
] == 'E')
2780 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2782 rtx t
= cselib_subst_to_values (XVECEXP (x
, i
, j
));
2784 if (t
!= XVECEXP (x
, i
, j
) && XVEC (x
, i
) == XVEC (copy
, i
))
2787 copy
= shallow_copy_rtx (x
);
2789 XVEC (copy
, i
) = rtvec_alloc (XVECLEN (x
, i
));
2790 for (k
= 0; k
< j
; k
++)
2791 XVECEXP (copy
, i
, k
) = XVECEXP (x
, i
, k
);
2794 XVECEXP (copy
, i
, j
) = t
;
2802 /* Look up the rtl expression X in our tables and return the value it has.
2803 If CREATE is zero, we return NULL if we don't know the value. Otherwise,
2804 we create a new one if possible, using mode MODE if X doesn't have a mode
2805 (i.e. because it's a constant). */
2808 cselib_lookup (x
, mode
, create
)
2810 enum machine_mode mode
;
2815 unsigned int hashval
;
2817 if (GET_MODE (x
) != VOIDmode
)
2818 mode
= GET_MODE (x
);
2820 if (GET_CODE (x
) == VALUE
)
2821 return CSELIB_VAL_PTR (x
);
2823 if (GET_CODE (x
) == REG
)
2826 unsigned int i
= REGNO (x
);
2828 for (l
= REG_VALUES (i
); l
; l
= l
->next
)
2829 if (mode
== GET_MODE (l
->elt
->u
.val_rtx
))
2835 e
= new_cselib_val (++next_unknown_value
, GET_MODE (x
));
2836 e
->locs
= new_elt_loc_list (e
->locs
, x
);
2837 REG_VALUES (i
) = new_elt_list (REG_VALUES (i
), e
);
2838 slot
= htab_find_slot_with_hash (hash_table
, x
, e
->value
, INSERT
);
2843 if (GET_CODE (x
) == MEM
)
2844 return cselib_lookup_mem (x
, create
);
2846 hashval
= hash_rtx (x
, mode
, create
);
2847 /* Can't even create if hashing is not possible. */
2851 slot
= htab_find_slot_with_hash (hash_table
, x
, hashval
,
2852 create
? INSERT
: NO_INSERT
);
2856 e
= (cselib_val
*) *slot
;
2860 e
= new_cselib_val (hashval
, mode
);
2862 /* We have to fill the slot before calling cselib_subst_to_values:
2863 the hash table is inconsistent until we do so, and
2864 cselib_subst_to_values will need to do lookups. */
2866 e
->locs
= new_elt_loc_list (e
->locs
, cselib_subst_to_values (x
));
2870 /* Invalidate any entries in reg_values that overlap REGNO. This is called
2871 if REGNO is changing. MODE is the mode of the assignment to REGNO, which
2872 is used to determine how many hard registers are being changed. If MODE
2873 is VOIDmode, then only REGNO is being changed; this is used when
2874 invalidating call clobbered registers across a call. */
2877 cselib_invalidate_regno (regno
, mode
)
2879 enum machine_mode mode
;
2881 unsigned int endregno
;
2884 /* If we see pseudos after reload, something is _wrong_. */
2885 if (reload_completed
&& regno
>= FIRST_PSEUDO_REGISTER
2886 && reg_renumber
[regno
] >= 0)
2889 /* Determine the range of registers that must be invalidated. For
2890 pseudos, only REGNO is affected. For hard regs, we must take MODE
2891 into account, and we must also invalidate lower register numbers
2892 if they contain values that overlap REGNO. */
2893 endregno
= regno
+ 1;
2894 if (regno
< FIRST_PSEUDO_REGISTER
&& mode
!= VOIDmode
)
2895 endregno
= regno
+ HARD_REGNO_NREGS (regno
, mode
);
2897 for (i
= 0; i
< endregno
; i
++)
2899 struct elt_list
**l
= ®_VALUES (i
);
2901 /* Go through all known values for this reg; if it overlaps the range
2902 we're invalidating, remove the value. */
2905 cselib_val
*v
= (*l
)->elt
;
2906 struct elt_loc_list
**p
;
2907 unsigned int this_last
= i
;
2909 if (i
< FIRST_PSEUDO_REGISTER
)
2910 this_last
+= HARD_REGNO_NREGS (i
, GET_MODE (v
->u
.val_rtx
)) - 1;
2912 if (this_last
< regno
)
2918 /* We have an overlap. */
2919 unchain_one_elt_list (l
);
2921 /* Now, we clear the mapping from value to reg. It must exist, so
2922 this code will crash intentionally if it doesn't. */
2923 for (p
= &v
->locs
; ; p
= &(*p
)->next
)
2927 if (GET_CODE (x
) == REG
&& REGNO (x
) == i
)
2929 unchain_one_elt_loc_list (p
);
2939 /* The memory at address MEM_BASE is being changed.
2940 Return whether this change will invalidate VAL. */
2943 cselib_mem_conflict_p (mem_base
, val
)
2951 code
= GET_CODE (val
);
2954 /* Get rid of a few simple cases quickly. */
2967 if (GET_MODE (mem_base
) == BLKmode
2968 || GET_MODE (val
) == BLKmode
2969 || anti_dependence (val
, mem_base
))
2972 /* The address may contain nested MEMs. */
2979 fmt
= GET_RTX_FORMAT (code
);
2980 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2984 if (cselib_mem_conflict_p (mem_base
, XEXP (val
, i
)))
2987 else if (fmt
[i
] == 'E')
2988 for (j
= 0; j
< XVECLEN (val
, i
); j
++)
2989 if (cselib_mem_conflict_p (mem_base
, XVECEXP (val
, i
, j
)))
2996 /* For the value found in SLOT, walk its locations to determine if any overlap
2997 INFO (which is a MEM rtx). */
3000 cselib_invalidate_mem_1 (slot
, info
)
3004 cselib_val
*v
= (cselib_val
*) *slot
;
3005 rtx mem_rtx
= (rtx
) info
;
3006 struct elt_loc_list
**p
= &v
->locs
;
3007 int had_locs
= v
->locs
!= 0;
3013 struct elt_list
**mem_chain
;
3015 /* MEMs may occur in locations only at the top level; below
3016 that every MEM or REG is substituted by its VALUE. */
3017 if (GET_CODE (x
) != MEM
3018 || ! cselib_mem_conflict_p (mem_rtx
, x
))
3024 /* This one overlaps. */
3025 /* We must have a mapping from this MEM's address to the
3026 value (E). Remove that, too. */
3027 addr
= cselib_lookup (XEXP (x
, 0), VOIDmode
, 0);
3028 mem_chain
= &addr
->addr_list
;
3031 if ((*mem_chain
)->elt
== v
)
3033 unchain_one_elt_list (mem_chain
);
3037 mem_chain
= &(*mem_chain
)->next
;
3040 unchain_one_elt_loc_list (p
);
3043 if (had_locs
&& v
->locs
== 0)
3049 /* Invalidate any locations in the table which are changed because of a
3050 store to MEM_RTX. If this is called because of a non-const call
3051 instruction, MEM_RTX is (mem:BLK const0_rtx). */
3054 cselib_invalidate_mem (mem_rtx
)
3057 htab_traverse (hash_table
, cselib_invalidate_mem_1
, mem_rtx
);
3060 /* Invalidate DEST, which is being assigned to or clobbered. The second and
3061 the third parameter exist so that this function can be passed to
3062 note_stores; they are ignored. */
3065 cselib_invalidate_rtx (dest
, ignore
, data
)
3067 rtx ignore ATTRIBUTE_UNUSED
;
3068 void *data ATTRIBUTE_UNUSED
;
3070 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SIGN_EXTRACT
3071 || GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SUBREG
)
3072 dest
= XEXP (dest
, 0);
3074 if (GET_CODE (dest
) == REG
)
3075 cselib_invalidate_regno (REGNO (dest
), GET_MODE (dest
));
3076 else if (GET_CODE (dest
) == MEM
)
3077 cselib_invalidate_mem (dest
);
3079 /* Some machines don't define AUTO_INC_DEC, but they still use push
3080 instructions. We need to catch that case here in order to
3081 invalidate the stack pointer correctly. Note that invalidating
3082 the stack pointer is different from invalidating DEST. */
3083 if (push_operand (dest
, GET_MODE (dest
)))
3084 cselib_invalidate_rtx (stack_pointer_rtx
, NULL_RTX
, NULL
);
3087 /* Record the result of a SET instruction. DEST is being set; the source
3088 contains the value described by SRC_ELT. If DEST is a MEM, DEST_ADDR_ELT
3089 describes its address. */
3092 cselib_record_set (dest
, src_elt
, dest_addr_elt
)
3094 cselib_val
*src_elt
, *dest_addr_elt
;
3096 int dreg
= GET_CODE (dest
) == REG
? (int) REGNO (dest
) : -1;
3098 if (src_elt
== 0 || side_effects_p (dest
))
3103 REG_VALUES (dreg
) = new_elt_list (REG_VALUES (dreg
), src_elt
);
3104 if (src_elt
->locs
== 0)
3106 src_elt
->locs
= new_elt_loc_list (src_elt
->locs
, dest
);
3108 else if (GET_CODE (dest
) == MEM
&& dest_addr_elt
!= 0)
3110 if (src_elt
->locs
== 0)
3112 add_mem_for_addr (dest_addr_elt
, src_elt
, dest
);
3116 /* Describe a single set that is part of an insn. */
3121 cselib_val
*src_elt
;
3122 cselib_val
*dest_addr_elt
;
3125 /* There is no good way to determine how many elements there can be
3126 in a PARALLEL. Since it's fairly cheap, use a really large number. */
3127 #define MAX_SETS (FIRST_PSEUDO_REGISTER * 2)
3129 /* Record the effects of any sets in INSN. */
3131 cselib_record_sets (insn
)
3136 struct set sets
[MAX_SETS
];
3137 rtx body
= PATTERN (insn
);
3139 body
= PATTERN (insn
);
3140 /* Find all sets. */
3141 if (GET_CODE (body
) == SET
)
3143 sets
[0].src
= SET_SRC (body
);
3144 sets
[0].dest
= SET_DEST (body
);
3147 else if (GET_CODE (body
) == PARALLEL
)
3149 /* Look through the PARALLEL and record the values being
3150 set, if possible. Also handle any CLOBBERs. */
3151 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; --i
)
3153 rtx x
= XVECEXP (body
, 0, i
);
3155 if (GET_CODE (x
) == SET
)
3157 sets
[n_sets
].src
= SET_SRC (x
);
3158 sets
[n_sets
].dest
= SET_DEST (x
);
3164 /* Look up the values that are read. Do this before invalidating the
3165 locations that are written. */
3166 for (i
= 0; i
< n_sets
; i
++)
3168 sets
[i
].src_elt
= cselib_lookup (sets
[i
].src
, GET_MODE (sets
[i
].dest
),
3170 if (GET_CODE (sets
[i
].dest
) == MEM
)
3171 sets
[i
].dest_addr_elt
= cselib_lookup (XEXP (sets
[i
].dest
, 0), Pmode
,
3174 sets
[i
].dest_addr_elt
= 0;
3177 /* Invalidate all locations written by this insn. Note that the elts we
3178 looked up in the previous loop aren't affected, just some of their
3179 locations may go away. */
3180 note_stores (body
, cselib_invalidate_rtx
, NULL
);
3182 /* Now enter the equivalences in our tables. */
3183 for (i
= 0; i
< n_sets
; i
++)
3184 cselib_record_set (sets
[i
].dest
, sets
[i
].src_elt
, sets
[i
].dest_addr_elt
);
3187 /* Record the effects of INSN. */
3190 cselib_process_insn (insn
)
3196 cselib_current_insn
= insn
;
3198 /* Forget everything at a CODE_LABEL, a volatile asm, or a setjmp. */
3199 if (GET_CODE (insn
) == CODE_LABEL
3200 || (GET_CODE (insn
) == NOTE
3201 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_SETJMP
)
3202 || (GET_CODE (insn
) == INSN
3203 && GET_CODE (PATTERN (insn
)) == ASM_OPERANDS
3204 && MEM_VOLATILE_P (PATTERN (insn
))))
3210 if (! INSN_P (insn
))
3212 cselib_current_insn
= 0;
3216 /* If this is a call instruction, forget anything stored in a
3217 call clobbered register, or, if this is not a const call, in
3219 if (GET_CODE (insn
) == CALL_INSN
)
3221 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3222 if (call_used_regs
[i
])
3223 cselib_invalidate_regno (i
, VOIDmode
);
3225 if (! CONST_CALL_P (insn
))
3226 cselib_invalidate_mem (callmem
);
3229 cselib_record_sets (insn
);
3232 /* Clobber any registers which appear in REG_INC notes. We
3233 could keep track of the changes to their values, but it is
3234 unlikely to help. */
3235 for (x
= REG_NOTES (insn
); x
; x
= XEXP (x
, 1))
3236 if (REG_NOTE_KIND (x
) == REG_INC
)
3237 cselib_invalidate_rtx (XEXP (x
, 0), NULL_RTX
, NULL
);
3240 /* Look for any CLOBBERs in CALL_INSN_FUNCTION_USAGE, but only
3241 after we have processed the insn. */
3242 if (GET_CODE (insn
) == CALL_INSN
)
3243 for (x
= CALL_INSN_FUNCTION_USAGE (insn
); x
; x
= XEXP (x
, 1))
3244 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
3245 cselib_invalidate_rtx (XEXP (XEXP (x
, 0), 0), NULL_RTX
, NULL
);
3247 cselib_current_insn
= 0;
3249 if (n_useless_values
> MAX_USELESS_VALUES
)
3250 remove_useless_values ();
3253 /* Make sure our varrays are big enough. Not called from any cselib routines;
3254 it must be called by the user if it allocated new registers. */
3257 cselib_update_varray_sizes ()
3259 unsigned int nregs
= max_reg_num ();
3261 if (nregs
== cselib_nregs
)
3264 cselib_nregs
= nregs
;
3265 VARRAY_GROW (reg_values
, nregs
);
3268 /* Initialize cselib for one pass. The caller must also call
3269 init_alias_analysis. */
3274 /* These are only created once. */
3277 extern struct obstack permanent_obstack
;
3279 gcc_obstack_init (&cselib_obstack
);
3280 cselib_startobj
= obstack_alloc (&cselib_obstack
, 0);
3282 push_obstacks (&permanent_obstack
, &permanent_obstack
);
3283 callmem
= gen_rtx_MEM (BLKmode
, const0_rtx
);
3285 ggc_add_rtx_root (&callmem
, 1);
3288 cselib_nregs
= max_reg_num ();
3289 VARRAY_ELT_LIST_INIT (reg_values
, cselib_nregs
, "reg_values");
3290 hash_table
= htab_create (31, get_value_hash
, entry_and_rtx_equal_p
, NULL
);
3294 /* Called when the current user is done with cselib. */
3300 htab_delete (hash_table
);