1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000 Free Software Foundation, Inc.
5 This file is part of GNU CC.
7 GNU CC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 GNU CC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GNU CC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
30 #include "hard-reg-set.h"
33 #include "insn-config.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Nonzero if X has the form (PLUS frame-pointer integer). We check for
47 virtual regs here because the simplify_*_operation routines are called
48 by integrate.c, which is called before virtual register instantiation.
50 ?!? FIXED_BASE_PLUS_P and NONZERO_BASE_PLUS_P need to move into
51 a header file so that their definitions can be shared with the
52 simplification routines in simplify-rtx.c. Until then, do not
53 change these macros without also changing the copy in simplify-rtx.c. */
55 #define FIXED_BASE_PLUS_P(X) \
56 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
57 || ((X) == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])\
58 || (X) == virtual_stack_vars_rtx \
59 || (X) == virtual_incoming_args_rtx \
60 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
61 && (XEXP (X, 0) == frame_pointer_rtx \
62 || XEXP (X, 0) == hard_frame_pointer_rtx \
63 || ((X) == arg_pointer_rtx \
64 && fixed_regs[ARG_POINTER_REGNUM]) \
65 || XEXP (X, 0) == virtual_stack_vars_rtx \
66 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
67 || GET_CODE (X) == ADDRESSOF)
69 /* Similar, but also allows reference to the stack pointer.
71 This used to include FIXED_BASE_PLUS_P, however, we can't assume that
72 arg_pointer_rtx by itself is nonzero, because on at least one machine,
73 the i960, the arg pointer is zero when it is unused. */
75 #define NONZERO_BASE_PLUS_P(X) \
76 ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx \
77 || (X) == virtual_stack_vars_rtx \
78 || (X) == virtual_incoming_args_rtx \
79 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
80 && (XEXP (X, 0) == frame_pointer_rtx \
81 || XEXP (X, 0) == hard_frame_pointer_rtx \
82 || ((X) == arg_pointer_rtx \
83 && fixed_regs[ARG_POINTER_REGNUM]) \
84 || XEXP (X, 0) == virtual_stack_vars_rtx \
85 || XEXP (X, 0) == virtual_incoming_args_rtx)) \
86 || (X) == stack_pointer_rtx \
87 || (X) == virtual_stack_dynamic_rtx \
88 || (X) == virtual_outgoing_args_rtx \
89 || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
90 && (XEXP (X, 0) == stack_pointer_rtx \
91 || XEXP (X, 0) == virtual_stack_dynamic_rtx \
92 || XEXP (X, 0) == virtual_outgoing_args_rtx)) \
93 || GET_CODE (X) == ADDRESSOF)
95 /* Much code operates on (low, high) pairs; the low value is an
96 unsigned wide int, the high value a signed wide int. We
97 occasionally need to sign extend from low to high as if low were a
99 #define HWI_SIGN_EXTEND(low) \
100 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
102 static rtx simplify_plus_minus
PARAMS ((enum rtx_code
,
103 enum machine_mode
, rtx
, rtx
));
104 static void check_fold_consts
PARAMS ((PTR
));
105 static int entry_and_rtx_equal_p
PARAMS ((const void *, const void *));
106 static unsigned int get_value_hash
PARAMS ((const void *));
107 static struct elt_list
*new_elt_list
PARAMS ((struct elt_list
*,
109 static struct elt_loc_list
*new_elt_loc_list
PARAMS ((struct elt_loc_list
*,
111 static void unchain_one_value
PARAMS ((cselib_val
*));
112 static void unchain_one_elt_list
PARAMS ((struct elt_list
**));
113 static void unchain_one_elt_loc_list
PARAMS ((struct elt_loc_list
**));
114 static void clear_table
PARAMS ((void));
115 static int discard_useless_locs
PARAMS ((void **, void *));
116 static int discard_useless_values
PARAMS ((void **, void *));
117 static void remove_useless_values
PARAMS ((void));
118 static unsigned int hash_rtx
PARAMS ((rtx
, enum machine_mode
, int));
119 static cselib_val
*new_cselib_val
PARAMS ((unsigned int,
121 static void add_mem_for_addr
PARAMS ((cselib_val
*, cselib_val
*,
123 static cselib_val
*cselib_lookup_mem
PARAMS ((rtx
, int));
124 static rtx cselib_subst_to_values
PARAMS ((rtx
));
125 static void cselib_invalidate_regno
PARAMS ((unsigned int,
127 static int cselib_mem_conflict_p
PARAMS ((rtx
, rtx
));
128 static int cselib_invalidate_mem_1
PARAMS ((void **, void *));
129 static void cselib_invalidate_mem
PARAMS ((rtx
));
130 static void cselib_invalidate_rtx
PARAMS ((rtx
, rtx
, void *));
131 static void cselib_record_set
PARAMS ((rtx
, cselib_val
*,
133 static void cselib_record_sets
PARAMS ((rtx
));
135 /* There are three ways in which cselib can look up an rtx:
136 - for a REG, the reg_values table (which is indexed by regno) is used
137 - for a MEM, we recursively look up its address and then follow the
138 addr_list of that value
139 - for everything else, we compute a hash value and go through the hash
140 table. Since different rtx's can still have the same hash value,
141 this involves walking the table entries for a given value and comparing
142 the locations of the entries with the rtx we are looking up. */
144 /* A table that enables us to look up elts by their value. */
145 static htab_t hash_table
;
147 /* This is a global so we don't have to pass this through every function.
148 It is used in new_elt_loc_list to set SETTING_INSN. */
149 static rtx cselib_current_insn
;
151 /* Every new unknown value gets a unique number. */
152 static unsigned int next_unknown_value
;
154 /* The number of registers we had when the varrays were last resized. */
155 static unsigned int cselib_nregs
;
157 /* Count values without known locations. Whenever this grows too big, we
158 remove these useless values from the table. */
159 static int n_useless_values
;
161 /* Number of useless values before we remove them from the hash table. */
162 #define MAX_USELESS_VALUES 32
164 /* This table maps from register number to values. It does not contain
165 pointers to cselib_val structures, but rather elt_lists. The purpose is
166 to be able to refer to the same register in different modes. */
167 static varray_type reg_values
;
168 #define REG_VALUES(I) VARRAY_ELT_LIST (reg_values, (I))
170 /* We pass this to cselib_invalidate_mem to invalidate all of
171 memory for a non-const call instruction. */
174 /* Memory for our structures is allocated from this obstack. */
175 static struct obstack cselib_obstack
;
177 /* Used to quickly free all memory. */
178 static char *cselib_startobj
;
180 /* Caches for unused structures. */
181 static cselib_val
*empty_vals
;
182 static struct elt_list
*empty_elt_lists
;
183 static struct elt_loc_list
*empty_elt_loc_lists
;
185 /* Set by discard_useless_locs if it deleted the last location of any
187 static int values_became_useless
;
189 /* Make a binary operation by properly ordering the operands and
190 seeing if the expression folds. */
193 simplify_gen_binary (code
, mode
, op0
, op1
)
195 enum machine_mode mode
;
200 /* Put complex operands first and constants second if commutative. */
201 if (GET_RTX_CLASS (code
) == 'c'
202 && ((CONSTANT_P (op0
) && GET_CODE (op1
) != CONST_INT
)
203 || (GET_RTX_CLASS (GET_CODE (op0
)) == 'o'
204 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')
205 || (GET_CODE (op0
) == SUBREG
206 && GET_RTX_CLASS (GET_CODE (SUBREG_REG (op0
))) == 'o'
207 && GET_RTX_CLASS (GET_CODE (op1
)) != 'o')))
208 tem
= op0
, op0
= op1
, op1
= tem
;
210 /* If this simplifies, do it. */
211 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
216 /* Handle addition and subtraction of CONST_INT specially. Otherwise,
217 just form the operation. */
219 if (code
== PLUS
&& GET_CODE (op1
) == CONST_INT
220 && GET_MODE (op0
) != VOIDmode
)
221 return plus_constant (op0
, INTVAL (op1
));
222 else if (code
== MINUS
&& GET_CODE (op1
) == CONST_INT
223 && GET_MODE (op0
) != VOIDmode
)
224 return plus_constant (op0
, - INTVAL (op1
));
226 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
229 /* Try to simplify a unary operation CODE whose output mode is to be
230 MODE with input operand OP whose mode was originally OP_MODE.
231 Return zero if no simplification can be made. */
234 simplify_unary_operation (code
, mode
, op
, op_mode
)
236 enum machine_mode mode
;
238 enum machine_mode op_mode
;
240 unsigned int width
= GET_MODE_BITSIZE (mode
);
242 /* The order of these tests is critical so that, for example, we don't
243 check the wrong mode (input vs. output) for a conversion operation,
244 such as FIX. At some point, this should be simplified. */
246 #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC)
248 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
249 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
251 HOST_WIDE_INT hv
, lv
;
254 if (GET_CODE (op
) == CONST_INT
)
255 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
257 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
259 #ifdef REAL_ARITHMETIC
260 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
265 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
266 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
267 d
+= (double) (unsigned HOST_WIDE_INT
) (~ lv
);
273 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
274 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
275 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
277 #endif /* REAL_ARITHMETIC */
278 d
= real_value_truncate (mode
, d
);
279 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
281 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
282 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
284 HOST_WIDE_INT hv
, lv
;
287 if (GET_CODE (op
) == CONST_INT
)
288 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
290 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
292 if (op_mode
== VOIDmode
)
294 /* We don't know how to interpret negative-looking numbers in
295 this case, so don't try to fold those. */
299 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
302 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
304 #ifdef REAL_ARITHMETIC
305 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
308 d
= (double) (unsigned HOST_WIDE_INT
) hv
;
309 d
*= ((double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2))
310 * (double) ((HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
/ 2)));
311 d
+= (double) (unsigned HOST_WIDE_INT
) lv
;
312 #endif /* REAL_ARITHMETIC */
313 d
= real_value_truncate (mode
, d
);
314 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
318 if (GET_CODE (op
) == CONST_INT
319 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
321 register HOST_WIDE_INT arg0
= INTVAL (op
);
322 register HOST_WIDE_INT val
;
335 val
= (arg0
>= 0 ? arg0
: - arg0
);
339 /* Don't use ffs here. Instead, get low order bit and then its
340 number. If arg0 is zero, this will return 0, as desired. */
341 arg0
&= GET_MODE_MASK (mode
);
342 val
= exact_log2 (arg0
& (- arg0
)) + 1;
350 if (op_mode
== VOIDmode
)
352 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
354 /* If we were really extending the mode,
355 we would have to distinguish between zero-extension
356 and sign-extension. */
357 if (width
!= GET_MODE_BITSIZE (op_mode
))
361 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
362 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
368 if (op_mode
== VOIDmode
)
370 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
372 /* If we were really extending the mode,
373 we would have to distinguish between zero-extension
374 and sign-extension. */
375 if (width
!= GET_MODE_BITSIZE (op_mode
))
379 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
382 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
384 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
385 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
400 val
= trunc_int_for_mode (val
, mode
);
402 return GEN_INT (val
);
405 /* We can do some operations on integer CONST_DOUBLEs. Also allow
406 for a DImode operation on a CONST_INT. */
407 else if (GET_MODE (op
) == VOIDmode
&& width
<= HOST_BITS_PER_INT
* 2
408 && (GET_CODE (op
) == CONST_DOUBLE
|| GET_CODE (op
) == CONST_INT
))
410 unsigned HOST_WIDE_INT l1
, lv
;
411 HOST_WIDE_INT h1
, hv
;
413 if (GET_CODE (op
) == CONST_DOUBLE
)
414 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
416 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
426 neg_double (l1
, h1
, &lv
, &hv
);
431 neg_double (l1
, h1
, &lv
, &hv
);
439 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& (-h1
)) + 1;
441 lv
= exact_log2 (l1
& (-l1
)) + 1;
445 /* This is just a change-of-mode, so do nothing. */
450 if (op_mode
== VOIDmode
451 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
455 lv
= l1
& GET_MODE_MASK (op_mode
);
459 if (op_mode
== VOIDmode
460 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
464 lv
= l1
& GET_MODE_MASK (op_mode
);
465 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
466 && (lv
& ((HOST_WIDE_INT
) 1
467 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
468 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
470 hv
= HWI_SIGN_EXTEND (lv
);
481 return immed_double_const (lv
, hv
, mode
);
484 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
485 else if (GET_CODE (op
) == CONST_DOUBLE
486 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
492 if (setjmp (handler
))
493 /* There used to be a warning here, but that is inadvisable.
494 People may want to cause traps, and the natural way
495 to do it should not get a warning. */
498 set_float_handler (handler
);
500 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
505 d
= REAL_VALUE_NEGATE (d
);
509 if (REAL_VALUE_NEGATIVE (d
))
510 d
= REAL_VALUE_NEGATE (d
);
514 d
= real_value_truncate (mode
, d
);
518 /* All this does is change the mode. */
522 d
= REAL_VALUE_RNDZINT (d
);
526 d
= REAL_VALUE_UNSIGNED_RNDZINT (d
);
536 x
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
537 set_float_handler (NULL_PTR
);
541 else if (GET_CODE (op
) == CONST_DOUBLE
542 && GET_MODE_CLASS (GET_MODE (op
)) == MODE_FLOAT
543 && GET_MODE_CLASS (mode
) == MODE_INT
544 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
550 if (setjmp (handler
))
553 set_float_handler (handler
);
555 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
560 val
= REAL_VALUE_FIX (d
);
564 val
= REAL_VALUE_UNSIGNED_FIX (d
);
571 set_float_handler (NULL_PTR
);
573 val
= trunc_int_for_mode (val
, mode
);
575 return GEN_INT (val
);
578 /* This was formerly used only for non-IEEE float.
579 eggert@twinsun.com says it is safe for IEEE also. */
582 /* There are some simplifications we can do even if the operands
587 /* (not (not X)) == X. */
588 if (GET_CODE (op
) == NOT
)
591 /* (not (eq X Y)) == (ne X Y), etc. */
592 if (mode
== BImode
&& GET_RTX_CLASS (GET_CODE (op
)) == '<'
593 && can_reverse_comparison_p (op
, NULL_RTX
))
594 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (op
)),
595 op_mode
, XEXP (op
, 0), XEXP (op
, 1));
599 /* (neg (neg X)) == X. */
600 if (GET_CODE (op
) == NEG
)
605 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
606 becomes just the MINUS if its mode is MODE. This allows
607 folding switch statements on machines using casesi (such as
609 if (GET_CODE (op
) == TRUNCATE
610 && GET_MODE (XEXP (op
, 0)) == mode
611 && GET_CODE (XEXP (op
, 0)) == MINUS
612 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
613 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
616 #ifdef POINTERS_EXTEND_UNSIGNED
617 if (! POINTERS_EXTEND_UNSIGNED
618 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
620 return convert_memory_address (Pmode
, op
);
624 #ifdef POINTERS_EXTEND_UNSIGNED
626 if (POINTERS_EXTEND_UNSIGNED
627 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
629 return convert_memory_address (Pmode
, op
);
641 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
642 and OP1. Return 0 if no simplification is possible.
644 Don't use this for relational operations such as EQ or LT.
645 Use simplify_relational_operation instead. */
648 simplify_binary_operation (code
, mode
, op0
, op1
)
650 enum machine_mode mode
;
653 register HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
655 unsigned int width
= GET_MODE_BITSIZE (mode
);
658 /* Relational operations don't work here. We must know the mode
659 of the operands in order to do the comparison correctly.
660 Assuming a full word can give incorrect results.
661 Consider comparing 128 with -128 in QImode. */
663 if (GET_RTX_CLASS (code
) == '<')
666 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
667 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
668 && GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
669 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
671 REAL_VALUE_TYPE f0
, f1
, value
;
674 if (setjmp (handler
))
677 set_float_handler (handler
);
679 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
680 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
681 f0
= real_value_truncate (mode
, f0
);
682 f1
= real_value_truncate (mode
, f1
);
684 #ifdef REAL_ARITHMETIC
685 #ifndef REAL_INFINITY
686 if (code
== DIV
&& REAL_VALUES_EQUAL (f1
, dconst0
))
689 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
703 #ifndef REAL_INFINITY
710 value
= MIN (f0
, f1
);
713 value
= MAX (f0
, f1
);
720 value
= real_value_truncate (mode
, value
);
721 set_float_handler (NULL_PTR
);
722 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
724 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
726 /* We can fold some multi-word operations. */
727 if (GET_MODE_CLASS (mode
) == MODE_INT
728 && width
== HOST_BITS_PER_WIDE_INT
* 2
729 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
730 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
732 unsigned HOST_WIDE_INT l1
, l2
, lv
;
733 HOST_WIDE_INT h1
, h2
, hv
;
735 if (GET_CODE (op0
) == CONST_DOUBLE
)
736 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
738 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
740 if (GET_CODE (op1
) == CONST_DOUBLE
)
741 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
743 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
748 /* A - B == A + (-B). */
749 neg_double (l2
, h2
, &lv
, &hv
);
752 /* .. fall through ... */
755 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
759 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
762 case DIV
: case MOD
: case UDIV
: case UMOD
:
763 /* We'd need to include tree.h to do this and it doesn't seem worth
768 lv
= l1
& l2
, hv
= h1
& h2
;
772 lv
= l1
| l2
, hv
= h1
| h2
;
776 lv
= l1
^ l2
, hv
= h1
^ h2
;
782 && ((unsigned HOST_WIDE_INT
) l1
783 < (unsigned HOST_WIDE_INT
) l2
)))
792 && ((unsigned HOST_WIDE_INT
) l1
793 > (unsigned HOST_WIDE_INT
) l2
)))
800 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
802 && ((unsigned HOST_WIDE_INT
) l1
803 < (unsigned HOST_WIDE_INT
) l2
)))
810 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
812 && ((unsigned HOST_WIDE_INT
) l1
813 > (unsigned HOST_WIDE_INT
) l2
)))
819 case LSHIFTRT
: case ASHIFTRT
:
821 case ROTATE
: case ROTATERT
:
822 #ifdef SHIFT_COUNT_TRUNCATED
823 if (SHIFT_COUNT_TRUNCATED
)
824 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
827 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
830 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
831 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
833 else if (code
== ASHIFT
)
834 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
835 else if (code
== ROTATE
)
836 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
837 else /* code == ROTATERT */
838 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
845 return immed_double_const (lv
, hv
, mode
);
848 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
849 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
851 /* Even if we can't compute a constant result,
852 there are some cases worth simplifying. */
857 /* In IEEE floating point, x+0 is not the same as x. Similarly
858 for the other optimizations below. */
859 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
860 && FLOAT_MODE_P (mode
) && ! flag_fast_math
)
863 if (op1
== CONST0_RTX (mode
))
866 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)) */
867 if (GET_CODE (op0
) == NEG
)
868 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
869 else if (GET_CODE (op1
) == NEG
)
870 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
872 /* Handle both-operands-constant cases. We can only add
873 CONST_INTs to constants since the sum of relocatable symbols
874 can't be handled by most assemblers. Don't add CONST_INT
875 to CONST_INT since overflow won't be computed properly if wider
876 than HOST_BITS_PER_WIDE_INT. */
878 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
879 && GET_CODE (op1
) == CONST_INT
)
880 return plus_constant (op0
, INTVAL (op1
));
881 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
882 && GET_CODE (op0
) == CONST_INT
)
883 return plus_constant (op1
, INTVAL (op0
));
885 /* See if this is something like X * C - X or vice versa or
886 if the multiplication is written as a shift. If so, we can
887 distribute and make a new multiply, shift, or maybe just
888 have X (if C is 2 in the example above). But don't make
889 real multiply if we didn't have one before. */
891 if (! FLOAT_MODE_P (mode
))
893 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
894 rtx lhs
= op0
, rhs
= op1
;
897 if (GET_CODE (lhs
) == NEG
)
898 coeff0
= -1, lhs
= XEXP (lhs
, 0);
899 else if (GET_CODE (lhs
) == MULT
900 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
902 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
905 else if (GET_CODE (lhs
) == ASHIFT
906 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
907 && INTVAL (XEXP (lhs
, 1)) >= 0
908 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
910 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
914 if (GET_CODE (rhs
) == NEG
)
915 coeff1
= -1, rhs
= XEXP (rhs
, 0);
916 else if (GET_CODE (rhs
) == MULT
917 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
919 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
922 else if (GET_CODE (rhs
) == ASHIFT
923 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
924 && INTVAL (XEXP (rhs
, 1)) >= 0
925 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
927 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
931 if (rtx_equal_p (lhs
, rhs
))
933 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
934 GEN_INT (coeff0
+ coeff1
));
935 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
939 /* If one of the operands is a PLUS or a MINUS, see if we can
940 simplify this by the associative law.
941 Don't use the associative law for floating point.
942 The inaccuracy makes it nonassociative,
943 and subtle programs can break if operations are associated. */
945 if (INTEGRAL_MODE_P (mode
)
946 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
947 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
948 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
954 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
955 using cc0, in which case we want to leave it as a COMPARE
956 so we can distinguish it from a register-register-copy.
958 In IEEE floating point, x-0 is not the same as x. */
960 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
961 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
962 && op1
== CONST0_RTX (mode
))
966 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
967 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
968 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
969 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
971 rtx xop00
= XEXP (op0
, 0);
972 rtx xop10
= XEXP (op1
, 0);
975 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
977 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
978 && GET_MODE (xop00
) == GET_MODE (xop10
)
979 && REGNO (xop00
) == REGNO (xop10
)
980 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
981 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
988 /* None of these optimizations can be done for IEEE
990 if (TARGET_FLOAT_FORMAT
== IEEE_FLOAT_FORMAT
991 && FLOAT_MODE_P (mode
) && ! flag_fast_math
)
994 /* We can't assume x-x is 0 even with non-IEEE floating point,
995 but since it is zero except in very strange circumstances, we
996 will treat it as zero with -ffast-math. */
997 if (rtx_equal_p (op0
, op1
)
998 && ! side_effects_p (op0
)
999 && (! FLOAT_MODE_P (mode
) || flag_fast_math
))
1000 return CONST0_RTX (mode
);
1002 /* Change subtraction from zero into negation. */
1003 if (op0
== CONST0_RTX (mode
))
1004 return gen_rtx_NEG (mode
, op1
);
1006 /* (-1 - a) is ~a. */
1007 if (op0
== constm1_rtx
)
1008 return gen_rtx_NOT (mode
, op1
);
1010 /* Subtracting 0 has no effect. */
1011 if (op1
== CONST0_RTX (mode
))
1014 /* See if this is something like X * C - X or vice versa or
1015 if the multiplication is written as a shift. If so, we can
1016 distribute and make a new multiply, shift, or maybe just
1017 have X (if C is 2 in the example above). But don't make
1018 real multiply if we didn't have one before. */
1020 if (! FLOAT_MODE_P (mode
))
1022 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1023 rtx lhs
= op0
, rhs
= op1
;
1026 if (GET_CODE (lhs
) == NEG
)
1027 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1028 else if (GET_CODE (lhs
) == MULT
1029 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1031 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1034 else if (GET_CODE (lhs
) == ASHIFT
1035 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1036 && INTVAL (XEXP (lhs
, 1)) >= 0
1037 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1039 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1040 lhs
= XEXP (lhs
, 0);
1043 if (GET_CODE (rhs
) == NEG
)
1044 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1045 else if (GET_CODE (rhs
) == MULT
1046 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1048 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1051 else if (GET_CODE (rhs
) == ASHIFT
1052 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1053 && INTVAL (XEXP (rhs
, 1)) >= 0
1054 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1056 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1057 rhs
= XEXP (rhs
, 0);
1060 if (rtx_equal_p (lhs
, rhs
))
1062 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1063 GEN_INT (coeff0
- coeff1
));
1064 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1068 /* (a - (-b)) -> (a + b). */
1069 if (GET_CODE (op1
) == NEG
)
1070 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1072 /* If one of the operands is a PLUS or a MINUS, see if we can
1073 simplify this by the associative law.
1074 Don't use the associative law for floating point.
1075 The inaccuracy makes it nonassociative,
1076 and subtle programs can break if operations are associated. */
1078 if (INTEGRAL_MODE_P (mode
)
1079 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1080 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
)
1081 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1084 /* Don't let a relocatable value get a negative coeff. */
1085 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1086 return plus_constant (op0
, - INTVAL (op1
));
1088 /* (x - (x & y)) -> (x & ~y) */
1089 if (GET_CODE (op1
) == AND
)
1091 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1092 return simplify_gen_binary (AND
, mode
, op0
,
1093 gen_rtx_NOT (mode
, XEXP (op1
, 1)));
1094 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1095 return simplify_gen_binary (AND
, mode
, op0
,
1096 gen_rtx_NOT (mode
, XEXP (op1
, 0)));
1101 if (op1
== constm1_rtx
)
1103 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
1105 return tem
? tem
: gen_rtx_NEG (mode
, op0
);
1108 /* In IEEE floating point, x*0 is not always 0. */
1109 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1110 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
1111 && op1
== CONST0_RTX (mode
)
1112 && ! side_effects_p (op0
))
1115 /* In IEEE floating point, x*1 is not equivalent to x for nans.
1116 However, ANSI says we can drop signals,
1117 so we can do this anyway. */
1118 if (op1
== CONST1_RTX (mode
))
1121 /* Convert multiply by constant power of two into shift unless
1122 we are still generating RTL. This test is a kludge. */
1123 if (GET_CODE (op1
) == CONST_INT
1124 && (val
= exact_log2 (INTVAL (op1
))) >= 0
1125 /* If the mode is larger than the host word size, and the
1126 uppermost bit is set, then this isn't a power of two due
1127 to implicit sign extension. */
1128 && (width
<= HOST_BITS_PER_WIDE_INT
1129 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1130 && ! rtx_equal_function_value_matters
)
1131 return gen_rtx_ASHIFT (mode
, op0
, GEN_INT (val
));
1133 if (GET_CODE (op1
) == CONST_DOUBLE
1134 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
)
1138 int op1is2
, op1ism1
;
1140 if (setjmp (handler
))
1143 set_float_handler (handler
);
1144 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1145 op1is2
= REAL_VALUES_EQUAL (d
, dconst2
);
1146 op1ism1
= REAL_VALUES_EQUAL (d
, dconstm1
);
1147 set_float_handler (NULL_PTR
);
1149 /* x*2 is x+x and x*(-1) is -x */
1150 if (op1is2
&& GET_MODE (op0
) == mode
)
1151 return gen_rtx_PLUS (mode
, op0
, copy_rtx (op0
));
1153 else if (op1ism1
&& GET_MODE (op0
) == mode
)
1154 return gen_rtx_NEG (mode
, op0
);
1159 if (op1
== const0_rtx
)
1161 if (GET_CODE (op1
) == CONST_INT
1162 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1164 if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1166 /* A | (~A) -> -1 */
1167 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1168 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1169 && ! side_effects_p (op0
)
1170 && GET_MODE_CLASS (mode
) != MODE_CC
)
1175 if (op1
== const0_rtx
)
1177 if (GET_CODE (op1
) == CONST_INT
1178 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1179 return gen_rtx_NOT (mode
, op0
);
1180 if (op0
== op1
&& ! side_effects_p (op0
)
1181 && GET_MODE_CLASS (mode
) != MODE_CC
)
1186 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1188 if (GET_CODE (op1
) == CONST_INT
1189 && (INTVAL (op1
) & GET_MODE_MASK (mode
)) == GET_MODE_MASK (mode
))
1191 if (op0
== op1
&& ! side_effects_p (op0
)
1192 && GET_MODE_CLASS (mode
) != MODE_CC
)
1195 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1196 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1197 && ! side_effects_p (op0
)
1198 && GET_MODE_CLASS (mode
) != MODE_CC
)
1203 /* Convert divide by power of two into shift (divide by 1 handled
1205 if (GET_CODE (op1
) == CONST_INT
1206 && (arg1
= exact_log2 (INTVAL (op1
))) > 0)
1207 return gen_rtx_LSHIFTRT (mode
, op0
, GEN_INT (arg1
));
1209 /* ... fall through ... */
1212 if (op1
== CONST1_RTX (mode
))
1215 /* In IEEE floating point, 0/x is not always 0. */
1216 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1217 || ! FLOAT_MODE_P (mode
) || flag_fast_math
)
1218 && op0
== CONST0_RTX (mode
)
1219 && ! side_effects_p (op1
))
1222 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1223 /* Change division by a constant into multiplication. Only do
1224 this with -ffast-math until an expert says it is safe in
1226 else if (GET_CODE (op1
) == CONST_DOUBLE
1227 && GET_MODE_CLASS (GET_MODE (op1
)) == MODE_FLOAT
1228 && op1
!= CONST0_RTX (mode
)
1232 REAL_VALUE_FROM_CONST_DOUBLE (d
, op1
);
1234 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1236 #if defined (REAL_ARITHMETIC)
1237 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1238 return gen_rtx_MULT (mode
, op0
,
1239 CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
));
1242 gen_rtx_MULT (mode
, op0
,
1243 CONST_DOUBLE_FROM_REAL_VALUE (1./d
, mode
));
1251 /* Handle modulus by power of two (mod with 1 handled below). */
1252 if (GET_CODE (op1
) == CONST_INT
1253 && exact_log2 (INTVAL (op1
)) > 0)
1254 return gen_rtx_AND (mode
, op0
, GEN_INT (INTVAL (op1
) - 1));
1256 /* ... fall through ... */
1259 if ((op0
== const0_rtx
|| op1
== const1_rtx
)
1260 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1266 /* Rotating ~0 always results in ~0. */
1267 if (GET_CODE (op0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1268 && (unsigned HOST_WIDE_INT
) INTVAL (op0
) == GET_MODE_MASK (mode
)
1269 && ! side_effects_p (op1
))
1272 /* ... fall through ... */
1277 if (op1
== const0_rtx
)
1279 if (op0
== const0_rtx
&& ! side_effects_p (op1
))
1284 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1285 && INTVAL (op1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1286 && ! side_effects_p (op0
))
1288 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1293 if (width
<= HOST_BITS_PER_WIDE_INT
&& GET_CODE (op1
) == CONST_INT
1294 && ((unsigned HOST_WIDE_INT
) INTVAL (op1
)
1295 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1296 && ! side_effects_p (op0
))
1298 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1303 if (op1
== const0_rtx
&& ! side_effects_p (op0
))
1305 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1310 if (op1
== constm1_rtx
&& ! side_effects_p (op0
))
1312 else if (rtx_equal_p (op0
, op1
) && ! side_effects_p (op0
))
1323 /* Get the integer argument values in two forms:
1324 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1326 arg0
= INTVAL (op0
);
1327 arg1
= INTVAL (op1
);
1329 if (width
< HOST_BITS_PER_WIDE_INT
)
1331 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1332 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1335 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1336 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1339 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1340 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1348 /* Compute the value of the arithmetic. */
1353 val
= arg0s
+ arg1s
;
1357 val
= arg0s
- arg1s
;
1361 val
= arg0s
* arg1s
;
1367 val
= arg0s
/ arg1s
;
1373 val
= arg0s
% arg1s
;
1379 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1385 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1401 /* If shift count is undefined, don't fold it; let the machine do
1402 what it wants. But truncate it if the machine will do that. */
1406 #ifdef SHIFT_COUNT_TRUNCATED
1407 if (SHIFT_COUNT_TRUNCATED
)
1411 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
1418 #ifdef SHIFT_COUNT_TRUNCATED
1419 if (SHIFT_COUNT_TRUNCATED
)
1423 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
1430 #ifdef SHIFT_COUNT_TRUNCATED
1431 if (SHIFT_COUNT_TRUNCATED
)
1435 val
= arg0s
>> arg1
;
1437 /* Bootstrap compiler may not have sign extended the right shift.
1438 Manually extend the sign to insure bootstrap cc matches gcc. */
1439 if (arg0s
< 0 && arg1
> 0)
1440 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
1449 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
1450 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
1458 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
1459 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
1463 /* Do nothing here. */
1467 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
1471 val
= ((unsigned HOST_WIDE_INT
) arg0
1472 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1476 val
= arg0s
> arg1s
? arg0s
: arg1s
;
1480 val
= ((unsigned HOST_WIDE_INT
) arg0
1481 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
1488 val
= trunc_int_for_mode (val
, mode
);
1490 return GEN_INT (val
);
1493 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1496 Rather than test for specific case, we do this by a brute-force method
1497 and do all possible simplifications until no more changes occur. Then
1498 we rebuild the operation. */
1501 simplify_plus_minus (code
, mode
, op0
, op1
)
1503 enum machine_mode mode
;
1509 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
= 0;
1510 int first
= 1, negate
= 0, changed
;
1513 memset ((char *) ops
, 0, sizeof ops
);
1515 /* Set up the two operands and then expand them until nothing has been
1516 changed. If we run out of room in our array, give up; this should
1517 almost never happen. */
1519 ops
[0] = op0
, ops
[1] = op1
, negs
[0] = 0, negs
[1] = (code
== MINUS
);
1526 for (i
= 0; i
< n_ops
; i
++)
1527 switch (GET_CODE (ops
[i
]))
1534 ops
[n_ops
] = XEXP (ops
[i
], 1);
1535 negs
[n_ops
++] = GET_CODE (ops
[i
]) == MINUS
? !negs
[i
] : negs
[i
];
1536 ops
[i
] = XEXP (ops
[i
], 0);
1542 ops
[i
] = XEXP (ops
[i
], 0);
1543 negs
[i
] = ! negs
[i
];
1548 ops
[i
] = XEXP (ops
[i
], 0);
1554 /* ~a -> (-a - 1) */
1557 ops
[n_ops
] = constm1_rtx
;
1558 negs
[n_ops
++] = negs
[i
];
1559 ops
[i
] = XEXP (ops
[i
], 0);
1560 negs
[i
] = ! negs
[i
];
1567 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0, changed
= 1;
1575 /* If we only have two operands, we can't do anything. */
1579 /* Now simplify each pair of operands until nothing changes. The first
1580 time through just simplify constants against each other. */
1587 for (i
= 0; i
< n_ops
- 1; i
++)
1588 for (j
= i
+ 1; j
< n_ops
; j
++)
1589 if (ops
[i
] != 0 && ops
[j
] != 0
1590 && (! first
|| (CONSTANT_P (ops
[i
]) && CONSTANT_P (ops
[j
]))))
1592 rtx lhs
= ops
[i
], rhs
= ops
[j
];
1593 enum rtx_code ncode
= PLUS
;
1595 if (negs
[i
] && ! negs
[j
])
1596 lhs
= ops
[j
], rhs
= ops
[i
], ncode
= MINUS
;
1597 else if (! negs
[i
] && negs
[j
])
1600 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
1603 ops
[i
] = tem
, ops
[j
] = 0;
1604 negs
[i
] = negs
[i
] && negs
[j
];
1605 if (GET_CODE (tem
) == NEG
)
1606 ops
[i
] = XEXP (tem
, 0), negs
[i
] = ! negs
[i
];
1608 if (GET_CODE (ops
[i
]) == CONST_INT
&& negs
[i
])
1609 ops
[i
] = GEN_INT (- INTVAL (ops
[i
])), negs
[i
] = 0;
1617 /* Pack all the operands to the lower-numbered entries and give up if
1618 we didn't reduce the number of operands we had. Make sure we
1619 count a CONST as two operands. If we have the same number of
1620 operands, but have made more CONSTs than we had, this is also
1621 an improvement, so accept it. */
1623 for (i
= 0, j
= 0; j
< n_ops
; j
++)
1626 ops
[i
] = ops
[j
], negs
[i
++] = negs
[j
];
1627 if (GET_CODE (ops
[j
]) == CONST
)
1631 if (i
+ n_consts
> input_ops
1632 || (i
+ n_consts
== input_ops
&& n_consts
<= input_consts
))
1637 /* If we have a CONST_INT, put it last. */
1638 for (i
= 0; i
< n_ops
- 1; i
++)
1639 if (GET_CODE (ops
[i
]) == CONST_INT
)
1641 tem
= ops
[n_ops
- 1], ops
[n_ops
- 1] = ops
[i
] , ops
[i
] = tem
;
1642 j
= negs
[n_ops
- 1], negs
[n_ops
- 1] = negs
[i
], negs
[i
] = j
;
1645 /* Put a non-negated operand first. If there aren't any, make all
1646 operands positive and negate the whole thing later. */
1647 for (i
= 0; i
< n_ops
&& negs
[i
]; i
++)
1652 for (i
= 0; i
< n_ops
; i
++)
1658 tem
= ops
[0], ops
[0] = ops
[i
], ops
[i
] = tem
;
1659 j
= negs
[0], negs
[0] = negs
[i
], negs
[i
] = j
;
1662 /* Now make the result by performing the requested operations. */
1664 for (i
= 1; i
< n_ops
; i
++)
1665 result
= simplify_gen_binary (negs
[i
] ? MINUS
: PLUS
, mode
, result
, ops
[i
]);
1667 return negate
? gen_rtx_NEG (mode
, result
) : result
;
1672 rtx op0
, op1
; /* Input */
1673 int equal
, op0lt
, op1lt
; /* Output */
1677 check_fold_consts (data
)
1680 struct cfc_args
*args
= (struct cfc_args
*) data
;
1681 REAL_VALUE_TYPE d0
, d1
;
1683 REAL_VALUE_FROM_CONST_DOUBLE (d0
, args
->op0
);
1684 REAL_VALUE_FROM_CONST_DOUBLE (d1
, args
->op1
);
1685 args
->equal
= REAL_VALUES_EQUAL (d0
, d1
);
1686 args
->op0lt
= REAL_VALUES_LESS (d0
, d1
);
1687 args
->op1lt
= REAL_VALUES_LESS (d1
, d0
);
1690 /* Like simplify_binary_operation except used for relational operators.
1691 MODE is the mode of the operands, not that of the result. If MODE
1692 is VOIDmode, both operands must also be VOIDmode and we compare the
1693 operands in "infinite precision".
1695 If no simplification is possible, this function returns zero. Otherwise,
1696 it returns either const_true_rtx or const0_rtx. */
1699 simplify_relational_operation (code
, mode
, op0
, op1
)
1701 enum machine_mode mode
;
1704 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
1707 if (mode
== VOIDmode
1708 && (GET_MODE (op0
) != VOIDmode
1709 || GET_MODE (op1
) != VOIDmode
))
1712 /* If op0 is a compare, extract the comparison arguments from it. */
1713 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
1714 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
1716 /* We can't simplify MODE_CC values since we don't know what the
1717 actual comparison is. */
1718 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
1725 /* Make sure the constant is second. */
1726 if ((CONSTANT_P (op0
) && ! CONSTANT_P (op1
))
1727 || (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) != CONST_INT
))
1729 tem
= op0
, op0
= op1
, op1
= tem
;
1730 code
= swap_condition (code
);
1733 /* For integer comparisons of A and B maybe we can simplify A - B and can
1734 then simplify a comparison of that with zero. If A and B are both either
1735 a register or a CONST_INT, this can't help; testing for these cases will
1736 prevent infinite recursion here and speed things up.
1738 If CODE is an unsigned comparison, then we can never do this optimization,
1739 because it gives an incorrect result if the subtraction wraps around zero.
1740 ANSI C defines unsigned operations such that they never overflow, and
1741 thus such cases can not be ignored. */
1743 if (INTEGRAL_MODE_P (mode
) && op1
!= const0_rtx
1744 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (op0
) == CONST_INT
)
1745 && (GET_CODE (op1
) == REG
|| GET_CODE (op1
) == CONST_INT
))
1746 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
1747 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
1748 return simplify_relational_operation (signed_condition (code
),
1749 mode
, tem
, const0_rtx
);
1751 /* For non-IEEE floating-point, if the two operands are equal, we know the
1753 if (rtx_equal_p (op0
, op1
)
1754 && (TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1755 || ! FLOAT_MODE_P (GET_MODE (op0
)) || flag_fast_math
))
1756 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
1758 /* If the operands are floating-point constants, see if we can fold
1760 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1761 else if (GET_CODE (op0
) == CONST_DOUBLE
&& GET_CODE (op1
) == CONST_DOUBLE
1762 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_FLOAT
)
1764 struct cfc_args args
;
1766 /* Setup input for check_fold_consts() */
1770 if (do_float_handler(check_fold_consts
, (PTR
) &args
) == 0)
1771 /* We got an exception from check_fold_consts() */
1774 /* Receive output from check_fold_consts() */
1776 op0lt
= op0ltu
= args
.op0lt
;
1777 op1lt
= op1ltu
= args
.op1lt
;
1779 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1781 /* Otherwise, see if the operands are both integers. */
1782 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
1783 && (GET_CODE (op0
) == CONST_DOUBLE
|| GET_CODE (op0
) == CONST_INT
)
1784 && (GET_CODE (op1
) == CONST_DOUBLE
|| GET_CODE (op1
) == CONST_INT
))
1786 int width
= GET_MODE_BITSIZE (mode
);
1787 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
1788 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
1790 /* Get the two words comprising each integer constant. */
1791 if (GET_CODE (op0
) == CONST_DOUBLE
)
1793 l0u
= l0s
= CONST_DOUBLE_LOW (op0
);
1794 h0u
= h0s
= CONST_DOUBLE_HIGH (op0
);
1798 l0u
= l0s
= INTVAL (op0
);
1799 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
1802 if (GET_CODE (op1
) == CONST_DOUBLE
)
1804 l1u
= l1s
= CONST_DOUBLE_LOW (op1
);
1805 h1u
= h1s
= CONST_DOUBLE_HIGH (op1
);
1809 l1u
= l1s
= INTVAL (op1
);
1810 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
1813 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
1814 we have to sign or zero-extend the values. */
1815 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
1817 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1818 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1820 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1821 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1823 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1824 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1826 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
1827 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
1829 equal
= (h0u
== h1u
&& l0u
== l1u
);
1830 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
1831 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
1832 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
1833 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
1836 /* Otherwise, there are some code-specific tests we can make. */
1842 /* References to the frame plus a constant or labels cannot
1843 be zero, but a SYMBOL_REF can due to #pragma weak. */
1844 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1845 || GET_CODE (op0
) == LABEL_REF
)
1846 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1847 /* On some machines, the ap reg can be 0 sometimes. */
1848 && op0
!= arg_pointer_rtx
1855 if (((NONZERO_BASE_PLUS_P (op0
) && op1
== const0_rtx
)
1856 || GET_CODE (op0
) == LABEL_REF
)
1857 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1858 && op0
!= arg_pointer_rtx
1861 return const_true_rtx
;
1865 /* Unsigned values are never negative. */
1866 if (op1
== const0_rtx
)
1867 return const_true_rtx
;
1871 if (op1
== const0_rtx
)
1876 /* Unsigned values are never greater than the largest
1878 if (GET_CODE (op1
) == CONST_INT
1879 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
1880 && INTEGRAL_MODE_P (mode
))
1881 return const_true_rtx
;
1885 if (GET_CODE (op1
) == CONST_INT
1886 && (unsigned HOST_WIDE_INT
) INTVAL (op1
) == GET_MODE_MASK (mode
)
1887 && INTEGRAL_MODE_P (mode
))
1898 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
1903 return equal
? const_true_rtx
: const0_rtx
;
1905 return ! equal
? const_true_rtx
: const0_rtx
;
1907 return op0lt
? const_true_rtx
: const0_rtx
;
1909 return op1lt
? const_true_rtx
: const0_rtx
;
1911 return op0ltu
? const_true_rtx
: const0_rtx
;
1913 return op1ltu
? const_true_rtx
: const0_rtx
;
1915 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
1917 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
1919 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
1921 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
1927 /* Simplify CODE, an operation with result mode MODE and three operands,
1928 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
1929 a constant. Return 0 if no simplifications is possible. */
1932 simplify_ternary_operation (code
, mode
, op0_mode
, op0
, op1
, op2
)
1934 enum machine_mode mode
, op0_mode
;
1937 unsigned int width
= GET_MODE_BITSIZE (mode
);
1939 /* VOIDmode means "infinite" precision. */
1941 width
= HOST_BITS_PER_WIDE_INT
;
1947 if (GET_CODE (op0
) == CONST_INT
1948 && GET_CODE (op1
) == CONST_INT
1949 && GET_CODE (op2
) == CONST_INT
1950 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
1951 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
1953 /* Extracting a bit-field from a constant */
1954 HOST_WIDE_INT val
= INTVAL (op0
);
1956 if (BITS_BIG_ENDIAN
)
1957 val
>>= (GET_MODE_BITSIZE (op0_mode
)
1958 - INTVAL (op2
) - INTVAL (op1
));
1960 val
>>= INTVAL (op2
);
1962 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
1964 /* First zero-extend. */
1965 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
1966 /* If desired, propagate sign bit. */
1967 if (code
== SIGN_EXTRACT
1968 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
1969 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
1972 /* Clear the bits that don't belong in our mode,
1973 unless they and our sign bit are all one.
1974 So we get either a reasonable negative value or a reasonable
1975 unsigned value for this mode. */
1976 if (width
< HOST_BITS_PER_WIDE_INT
1977 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
1978 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
1979 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1981 return GEN_INT (val
);
1986 if (GET_CODE (op0
) == CONST_INT
)
1987 return op0
!= const0_rtx
? op1
: op2
;
1989 /* Convert a == b ? b : a to "a". */
1990 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
1991 && (! FLOAT_MODE_P (mode
) || flag_fast_math
)
1992 && rtx_equal_p (XEXP (op0
, 0), op1
)
1993 && rtx_equal_p (XEXP (op0
, 1), op2
))
1995 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
1996 && (! FLOAT_MODE_P (mode
) || flag_fast_math
)
1997 && rtx_equal_p (XEXP (op0
, 1), op1
)
1998 && rtx_equal_p (XEXP (op0
, 0), op2
))
2000 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2002 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2003 ? GET_MODE (XEXP (op0
, 1))
2004 : GET_MODE (XEXP (op0
, 0)));
2006 = simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2007 XEXP (op0
, 0), XEXP (op0
, 1));
2009 /* See if any simplifications were possible. */
2010 if (temp
== const0_rtx
)
2012 else if (temp
== const1_rtx
)
2017 /* Look for happy constants in op1 and op2. */
2018 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2020 HOST_WIDE_INT t
= INTVAL (op1
);
2021 HOST_WIDE_INT f
= INTVAL (op2
);
2023 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2024 code
= GET_CODE (op0
);
2025 else if (t
== 0 && f
== STORE_FLAG_VALUE
2026 && can_reverse_comparison_p (op0
, NULL_RTX
))
2027 code
= reverse_condition (GET_CODE (op0
));
2031 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2043 /* Simplify X, an rtx expression.
2045 Return the simplified expression or NULL if no simplifications
2048 This is the preferred entry point into the simplification routines;
2049 however, we still allow passes to call the more specific routines.
2051 Right now GCC has three (yes, three) major bodies of RTL simplficiation
2052 code that need to be unified.
2054 1. fold_rtx in cse.c. This code uses various CSE specific
2055 information to aid in RTL simplification.
2057 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2058 it uses combine specific information to aid in RTL
2061 3. The routines in this file.
2064 Long term we want to only have one body of simplification code; to
2065 get to that state I recommend the following steps:
2067 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2068 which are not pass dependent state into these routines.
2070 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2071 use this routine whenever possible.
2073 3. Allow for pass dependent state to be provided to these
2074 routines and add simplifications based on the pass dependent
2075 state. Remove code from cse.c & combine.c that becomes
2078 It will take time, but ultimately the compiler will be easier to
2079 maintain and improve. It's totally silly that when we add a
2080 simplification that it needs to be added to 4 places (3 for RTL
2081 simplification and 1 for tree simplification. */
2088 enum machine_mode mode
;
2090 mode
= GET_MODE (x
);
2091 code
= GET_CODE (x
);
2093 switch (GET_RTX_CLASS (code
))
2096 return simplify_unary_operation (code
, mode
,
2097 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
2100 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
2104 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
2105 XEXP (x
, 0), XEXP (x
, 1), XEXP (x
, 2));
2108 return simplify_relational_operation (code
,
2109 (GET_MODE (XEXP (x
, 0)) != VOIDmode
2110 ? GET_MODE (XEXP (x
, 0))
2111 : GET_MODE (XEXP (x
, 1))),
2112 XEXP (x
, 0), XEXP (x
, 1));
2119 /* Allocate a struct elt_list and fill in its two elements with the
2122 static struct elt_list
*
2123 new_elt_list (next
, elt
)
2124 struct elt_list
*next
;
2127 struct elt_list
*el
= empty_elt_lists
;
2130 empty_elt_lists
= el
->next
;
2132 el
= (struct elt_list
*) obstack_alloc (&cselib_obstack
,
2133 sizeof (struct elt_list
));
2139 /* Allocate a struct elt_loc_list and fill in its two elements with the
2142 static struct elt_loc_list
*
2143 new_elt_loc_list (next
, loc
)
2144 struct elt_loc_list
*next
;
2147 struct elt_loc_list
*el
= empty_elt_loc_lists
;
2150 empty_elt_loc_lists
= el
->next
;
2152 el
= (struct elt_loc_list
*) obstack_alloc (&cselib_obstack
,
2153 sizeof (struct elt_loc_list
));
2156 el
->setting_insn
= cselib_current_insn
;
2160 /* The elt_list at *PL is no longer needed. Unchain it and free its
2164 unchain_one_elt_list (pl
)
2165 struct elt_list
**pl
;
2167 struct elt_list
*l
= *pl
;
2170 l
->next
= empty_elt_lists
;
2171 empty_elt_lists
= l
;
2174 /* Likewise for elt_loc_lists. */
2177 unchain_one_elt_loc_list (pl
)
2178 struct elt_loc_list
**pl
;
2180 struct elt_loc_list
*l
= *pl
;
2183 l
->next
= empty_elt_loc_lists
;
2184 empty_elt_loc_lists
= l
;
2187 /* Likewise for cselib_vals. This also frees the addr_list associated with
2191 unchain_one_value (v
)
2194 while (v
->addr_list
)
2195 unchain_one_elt_list (&v
->addr_list
);
2197 v
->u
.next_free
= empty_vals
;
2201 /* Remove all entries from the hash table. Also used during
2209 for (i
= 0; i
< cselib_nregs
; i
++)
2212 htab_empty (hash_table
);
2213 obstack_free (&cselib_obstack
, cselib_startobj
);
2216 empty_elt_lists
= 0;
2217 empty_elt_loc_lists
= 0;
2218 n_useless_values
= 0;
2220 next_unknown_value
= 0;
2223 /* The equality test for our hash table. The first argument ENTRY is a table
2224 element (i.e. a cselib_val), while the second arg X is an rtx. */
2227 entry_and_rtx_equal_p (entry
, x_arg
)
2228 const void *entry
, *x_arg
;
2230 struct elt_loc_list
*l
;
2231 const cselib_val
*v
= (const cselib_val
*) entry
;
2232 rtx x
= (rtx
) x_arg
;
2234 /* We don't guarantee that distinct rtx's have different hash values,
2235 so we need to do a comparison. */
2236 for (l
= v
->locs
; l
; l
= l
->next
)
2237 if (rtx_equal_for_cselib_p (l
->loc
, x
))
2243 /* The hash function for our hash table. The value is always computed with
2244 hash_rtx when adding an element; this function just extracts the hash
2245 value from a cselib_val structure. */
2248 get_value_hash (entry
)
2251 const cselib_val
*v
= (const cselib_val
*) entry
;
2255 /* Return true if X contains a VALUE rtx. If ONLY_USELESS is set, we
2256 only return true for values which point to a cselib_val whose value
2257 element has been set to zero, which implies the cselib_val will be
2261 references_value_p (x
, only_useless
)
2265 enum rtx_code code
= GET_CODE (x
);
2266 const char *fmt
= GET_RTX_FORMAT (code
);
2269 if (GET_CODE (x
) == VALUE
2270 && (! only_useless
|| CSELIB_VAL_PTR (x
)->locs
== 0))
2273 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2275 if (fmt
[i
] == 'e' && references_value_p (XEXP (x
, i
), only_useless
))
2277 else if (fmt
[i
] == 'E')
2278 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2279 if (references_value_p (XVECEXP (x
, i
, j
), only_useless
))
2286 /* For all locations found in X, delete locations that reference useless
2287 values (i.e. values without any location). Called through
2291 discard_useless_locs (x
, info
)
2293 void *info ATTRIBUTE_UNUSED
;
2295 cselib_val
*v
= (cselib_val
*)*x
;
2296 struct elt_loc_list
**p
= &v
->locs
;
2297 int had_locs
= v
->locs
!= 0;
2301 if (references_value_p ((*p
)->loc
, 1))
2302 unchain_one_elt_loc_list (p
);
2307 if (had_locs
&& v
->locs
== 0)
2310 values_became_useless
= 1;
2315 /* If X is a value with no locations, remove it from the hashtable. */
2318 discard_useless_values (x
, info
)
2320 void *info ATTRIBUTE_UNUSED
;
2322 cselib_val
*v
= (cselib_val
*)*x
;
2326 htab_clear_slot (hash_table
, x
);
2327 unchain_one_value (v
);
2334 /* Clean out useless values (i.e. those which no longer have locations
2335 associated with them) from the hash table. */
2338 remove_useless_values ()
2340 /* First pass: eliminate locations that reference the value. That in
2341 turn can make more values useless. */
2344 values_became_useless
= 0;
2345 htab_traverse (hash_table
, discard_useless_locs
, 0);
2347 while (values_became_useless
);
2349 /* Second pass: actually remove the values. */
2350 htab_traverse (hash_table
, discard_useless_values
, 0);
2352 if (n_useless_values
!= 0)
2356 /* Return nonzero if we can prove that X and Y contain the same value, taking
2357 our gathered information into account. */
2360 rtx_equal_for_cselib_p (x
, y
)
2367 if (GET_CODE (x
) == REG
|| GET_CODE (x
) == MEM
)
2369 cselib_val
*e
= cselib_lookup (x
, VOIDmode
, 0);
2375 if (GET_CODE (y
) == REG
|| GET_CODE (y
) == MEM
)
2377 cselib_val
*e
= cselib_lookup (y
, VOIDmode
, 0);
2386 if (GET_CODE (x
) == VALUE
&& GET_CODE (y
) == VALUE
)
2387 return CSELIB_VAL_PTR (x
) == CSELIB_VAL_PTR (y
);
2389 if (GET_CODE (x
) == VALUE
)
2391 cselib_val
*e
= CSELIB_VAL_PTR (x
);
2392 struct elt_loc_list
*l
;
2394 for (l
= e
->locs
; l
; l
= l
->next
)
2398 /* Avoid infinite recursion. */
2399 if (GET_CODE (t
) == REG
|| GET_CODE (t
) == MEM
)
2401 else if (rtx_equal_for_cselib_p (t
, y
))
2408 if (GET_CODE (y
) == VALUE
)
2410 cselib_val
*e
= CSELIB_VAL_PTR (y
);
2411 struct elt_loc_list
*l
;
2413 for (l
= e
->locs
; l
; l
= l
->next
)
2417 if (GET_CODE (t
) == REG
|| GET_CODE (t
) == MEM
)
2419 else if (rtx_equal_for_cselib_p (x
, t
))
2426 if (GET_CODE (x
) != GET_CODE (y
) || GET_MODE (x
) != GET_MODE (y
))
2429 /* This won't be handled correctly by the code below. */
2430 if (GET_CODE (x
) == LABEL_REF
)
2431 return XEXP (x
, 0) == XEXP (y
, 0);
2433 code
= GET_CODE (x
);
2434 fmt
= GET_RTX_FORMAT (code
);
2436 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2443 if (XWINT (x
, i
) != XWINT (y
, i
))
2449 if (XINT (x
, i
) != XINT (y
, i
))
2455 /* Two vectors must have the same length. */
2456 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
2459 /* And the corresponding elements must match. */
2460 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2461 if (! rtx_equal_for_cselib_p (XVECEXP (x
, i
, j
),
2467 if (! rtx_equal_for_cselib_p (XEXP (x
, i
), XEXP (y
, i
)))
2473 if (strcmp (XSTR (x
, i
), XSTR (y
, i
)))
2478 /* These are just backpointers, so they don't matter. */
2485 /* It is believed that rtx's at this level will never
2486 contain anything but integers and other rtx's,
2487 except for within LABEL_REFs and SYMBOL_REFs. */
2495 /* Hash an rtx. Return 0 if we couldn't hash the rtx.
2496 For registers and memory locations, we look up their cselib_val structure
2497 and return its VALUE element.
2498 Possible reasons for return 0 are: the object is volatile, or we couldn't
2499 find a register or memory location in the table and CREATE is zero. If
2500 CREATE is nonzero, table elts are created for regs and mem.
2501 MODE is used in hashing for CONST_INTs only;
2502 otherwise the mode of X is used. */
2505 hash_rtx (x
, mode
, create
)
2507 enum machine_mode mode
;
2514 unsigned int hash
= 0;
2516 /* repeat is used to turn tail-recursion into iteration. */
2518 code
= GET_CODE (x
);
2519 hash
+= (unsigned) code
+ (unsigned) GET_MODE (x
);
2525 e
= cselib_lookup (x
, GET_MODE (x
), create
);
2533 hash
+= ((unsigned) CONST_INT
<< 7) + (unsigned) mode
+ INTVAL (x
);
2534 return hash
? hash
: CONST_INT
;
2537 /* This is like the general case, except that it only counts
2538 the integers representing the constant. */
2539 hash
+= (unsigned) code
+ (unsigned) GET_MODE (x
);
2540 if (GET_MODE (x
) != VOIDmode
)
2541 for (i
= 2; i
< GET_RTX_LENGTH (CONST_DOUBLE
); i
++)
2542 hash
+= XWINT (x
, i
);
2544 hash
+= ((unsigned) CONST_DOUBLE_LOW (x
)
2545 + (unsigned) CONST_DOUBLE_HIGH (x
));
2546 return hash
? hash
: CONST_DOUBLE
;
2548 /* Assume there is only one rtx object for any given label. */
2551 += ((unsigned) LABEL_REF
<< 7) + (unsigned long) XEXP (x
, 0);
2552 return hash
? hash
: LABEL_REF
;
2556 += ((unsigned) SYMBOL_REF
<< 7) + (unsigned long) XSTR (x
, 0);
2557 return hash
? hash
: SYMBOL_REF
;
2568 case UNSPEC_VOLATILE
:
2572 if (MEM_VOLATILE_P (x
))
2581 i
= GET_RTX_LENGTH (code
) - 1;
2582 fmt
= GET_RTX_FORMAT (code
);
2587 rtx tem
= XEXP (x
, i
);
2588 unsigned int tem_hash
;
2590 /* If we are about to do the last recursive call
2591 needed at this level, change it into iteration.
2592 This function is called enough to be worth it. */
2599 tem_hash
= hash_rtx (tem
, 0, create
);
2605 else if (fmt
[i
] == 'E')
2606 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2608 unsigned int tem_hash
= hash_rtx (XVECEXP (x
, i
, j
), 0, create
);
2615 else if (fmt
[i
] == 's')
2617 const unsigned char *p
= (const unsigned char *) XSTR (x
, i
);
2623 else if (fmt
[i
] == 'i')
2624 hash
+= XINT (x
, i
);
2625 else if (fmt
[i
] == '0' || fmt
[i
] == 't')
2631 return hash
? hash
: 1 + GET_CODE (x
);
2634 /* Create a new value structure for VALUE and initialize it. The mode of the
2638 new_cselib_val (value
, mode
)
2640 enum machine_mode mode
;
2642 cselib_val
*e
= empty_vals
;
2645 empty_vals
= e
->u
.next_free
;
2647 e
= (cselib_val
*) obstack_alloc (&cselib_obstack
, sizeof (cselib_val
));
2653 e
->u
.val_rtx
= gen_rtx_VALUE (mode
);
2654 CSELIB_VAL_PTR (e
->u
.val_rtx
) = e
;
2660 /* ADDR_ELT is a value that is used as address. MEM_ELT is the value that
2661 contains the data at this address. X is a MEM that represents the
2662 value. Update the two value structures to represent this situation. */
2665 add_mem_for_addr (addr_elt
, mem_elt
, x
)
2666 cselib_val
*addr_elt
, *mem_elt
;
2670 struct elt_loc_list
*l
;
2672 /* Avoid duplicates. */
2673 for (l
= mem_elt
->locs
; l
; l
= l
->next
)
2674 if (GET_CODE (l
->loc
) == MEM
2675 && CSELIB_VAL_PTR (XEXP (l
->loc
, 0)) == addr_elt
)
2678 new = gen_rtx_MEM (GET_MODE (x
), addr_elt
->u
.val_rtx
);
2679 MEM_COPY_ATTRIBUTES (new, x
);
2681 addr_elt
->addr_list
= new_elt_list (addr_elt
->addr_list
, mem_elt
);
2682 mem_elt
->locs
= new_elt_loc_list (mem_elt
->locs
, new);
2685 /* Subroutine of cselib_lookup. Return a value for X, which is a MEM rtx.
2686 If CREATE, make a new one if we haven't seen it before. */
2689 cselib_lookup_mem (x
, create
)
2695 cselib_val
*mem_elt
;
2698 if (MEM_VOLATILE_P (x
) || GET_MODE (x
) == BLKmode
2699 || (FLOAT_MODE_P (GET_MODE (x
)) && flag_float_store
))
2702 /* Look up the value for the address. */
2703 addr
= cselib_lookup (XEXP (x
, 0), GET_MODE (x
), create
);
2707 /* Find a value that describes a value of our mode at that address. */
2708 for (l
= addr
->addr_list
; l
; l
= l
->next
)
2709 if (GET_MODE (l
->elt
->u
.val_rtx
) == GET_MODE (x
))
2715 mem_elt
= new_cselib_val (++next_unknown_value
, GET_MODE (x
));
2716 add_mem_for_addr (addr
, mem_elt
, x
);
2717 slot
= htab_find_slot_with_hash (hash_table
, x
, mem_elt
->value
, INSERT
);
2722 /* Walk rtx X and replace all occurrences of REG and MEM subexpressions
2723 with VALUE expressions. This way, it becomes independent of changes
2724 to registers and memory.
2725 X isn't actually modified; if modifications are needed, new rtl is
2726 allocated. However, the return value can share rtl with X. */
2729 cselib_subst_to_values (x
)
2732 enum rtx_code code
= GET_CODE (x
);
2733 const char *fmt
= GET_RTX_FORMAT (code
);
2742 for (l
= REG_VALUES (REGNO (x
)); l
; l
= l
->next
)
2743 if (GET_MODE (l
->elt
->u
.val_rtx
) == GET_MODE (x
))
2744 return l
->elt
->u
.val_rtx
;
2749 e
= cselib_lookup_mem (x
, 0);
2752 return e
->u
.val_rtx
;
2754 /* CONST_DOUBLEs must be special-cased here so that we won't try to
2755 look up the CONST_DOUBLE_MEM inside. */
2764 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2768 rtx t
= cselib_subst_to_values (XEXP (x
, i
));
2770 if (t
!= XEXP (x
, i
) && x
== copy
)
2771 copy
= shallow_copy_rtx (x
);
2775 else if (fmt
[i
] == 'E')
2779 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2781 rtx t
= cselib_subst_to_values (XVECEXP (x
, i
, j
));
2783 if (t
!= XVECEXP (x
, i
, j
) && XVEC (x
, i
) == XVEC (copy
, i
))
2786 copy
= shallow_copy_rtx (x
);
2788 XVEC (copy
, i
) = rtvec_alloc (XVECLEN (x
, i
));
2789 for (k
= 0; k
< j
; k
++)
2790 XVECEXP (copy
, i
, k
) = XVECEXP (x
, i
, k
);
2793 XVECEXP (copy
, i
, j
) = t
;
2801 /* Look up the rtl expression X in our tables and return the value it has.
2802 If CREATE is zero, we return NULL if we don't know the value. Otherwise,
2803 we create a new one if possible, using mode MODE if X doesn't have a mode
2804 (i.e. because it's a constant). */
2807 cselib_lookup (x
, mode
, create
)
2809 enum machine_mode mode
;
2814 unsigned int hashval
;
2816 if (GET_MODE (x
) != VOIDmode
)
2817 mode
= GET_MODE (x
);
2819 if (GET_CODE (x
) == VALUE
)
2820 return CSELIB_VAL_PTR (x
);
2822 if (GET_CODE (x
) == REG
)
2825 unsigned int i
= REGNO (x
);
2827 for (l
= REG_VALUES (i
); l
; l
= l
->next
)
2828 if (mode
== GET_MODE (l
->elt
->u
.val_rtx
))
2834 e
= new_cselib_val (++next_unknown_value
, GET_MODE (x
));
2835 e
->locs
= new_elt_loc_list (e
->locs
, x
);
2836 REG_VALUES (i
) = new_elt_list (REG_VALUES (i
), e
);
2837 slot
= htab_find_slot_with_hash (hash_table
, x
, e
->value
, INSERT
);
2842 if (GET_CODE (x
) == MEM
)
2843 return cselib_lookup_mem (x
, create
);
2845 hashval
= hash_rtx (x
, mode
, create
);
2846 /* Can't even create if hashing is not possible. */
2850 slot
= htab_find_slot_with_hash (hash_table
, x
, hashval
,
2851 create
? INSERT
: NO_INSERT
);
2855 e
= (cselib_val
*) *slot
;
2859 e
= new_cselib_val (hashval
, mode
);
2861 /* We have to fill the slot before calling cselib_subst_to_values:
2862 the hash table is inconsistent until we do so, and
2863 cselib_subst_to_values will need to do lookups. */
2865 e
->locs
= new_elt_loc_list (e
->locs
, cselib_subst_to_values (x
));
2869 /* Invalidate any entries in reg_values that overlap REGNO. This is called
2870 if REGNO is changing. MODE is the mode of the assignment to REGNO, which
2871 is used to determine how many hard registers are being changed. If MODE
2872 is VOIDmode, then only REGNO is being changed; this is used when
2873 invalidating call clobbered registers across a call. */
2876 cselib_invalidate_regno (regno
, mode
)
2878 enum machine_mode mode
;
2880 unsigned int endregno
;
2883 /* If we see pseudos after reload, something is _wrong_. */
2884 if (reload_completed
&& regno
>= FIRST_PSEUDO_REGISTER
2885 && reg_renumber
[regno
] >= 0)
2888 /* Determine the range of registers that must be invalidated. For
2889 pseudos, only REGNO is affected. For hard regs, we must take MODE
2890 into account, and we must also invalidate lower register numbers
2891 if they contain values that overlap REGNO. */
2892 endregno
= regno
+ 1;
2893 if (regno
< FIRST_PSEUDO_REGISTER
&& mode
!= VOIDmode
)
2894 endregno
= regno
+ HARD_REGNO_NREGS (regno
, mode
);
2896 for (i
= 0; i
< endregno
; i
++)
2898 struct elt_list
**l
= ®_VALUES (i
);
2900 /* Go through all known values for this reg; if it overlaps the range
2901 we're invalidating, remove the value. */
2904 cselib_val
*v
= (*l
)->elt
;
2905 struct elt_loc_list
**p
;
2906 unsigned int this_last
= i
;
2908 if (i
< FIRST_PSEUDO_REGISTER
)
2909 this_last
+= HARD_REGNO_NREGS (i
, GET_MODE (v
->u
.val_rtx
)) - 1;
2911 if (this_last
< regno
)
2917 /* We have an overlap. */
2918 unchain_one_elt_list (l
);
2920 /* Now, we clear the mapping from value to reg. It must exist, so
2921 this code will crash intentionally if it doesn't. */
2922 for (p
= &v
->locs
; ; p
= &(*p
)->next
)
2926 if (GET_CODE (x
) == REG
&& REGNO (x
) == i
)
2928 unchain_one_elt_loc_list (p
);
2938 /* The memory at address MEM_BASE is being changed.
2939 Return whether this change will invalidate VAL. */
2942 cselib_mem_conflict_p (mem_base
, val
)
2950 code
= GET_CODE (val
);
2953 /* Get rid of a few simple cases quickly. */
2966 if (GET_MODE (mem_base
) == BLKmode
2967 || GET_MODE (val
) == BLKmode
2968 || anti_dependence (val
, mem_base
))
2971 /* The address may contain nested MEMs. */
2978 fmt
= GET_RTX_FORMAT (code
);
2979 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2983 if (cselib_mem_conflict_p (mem_base
, XEXP (val
, i
)))
2986 else if (fmt
[i
] == 'E')
2987 for (j
= 0; j
< XVECLEN (val
, i
); j
++)
2988 if (cselib_mem_conflict_p (mem_base
, XVECEXP (val
, i
, j
)))
2995 /* For the value found in SLOT, walk its locations to determine if any overlap
2996 INFO (which is a MEM rtx). */
2999 cselib_invalidate_mem_1 (slot
, info
)
3003 cselib_val
*v
= (cselib_val
*) *slot
;
3004 rtx mem_rtx
= (rtx
) info
;
3005 struct elt_loc_list
**p
= &v
->locs
;
3006 int had_locs
= v
->locs
!= 0;
3012 struct elt_list
**mem_chain
;
3014 /* MEMs may occur in locations only at the top level; below
3015 that every MEM or REG is substituted by its VALUE. */
3016 if (GET_CODE (x
) != MEM
3017 || ! cselib_mem_conflict_p (mem_rtx
, x
))
3023 /* This one overlaps. */
3024 /* We must have a mapping from this MEM's address to the
3025 value (E). Remove that, too. */
3026 addr
= cselib_lookup (XEXP (x
, 0), VOIDmode
, 0);
3027 mem_chain
= &addr
->addr_list
;
3030 if ((*mem_chain
)->elt
== v
)
3032 unchain_one_elt_list (mem_chain
);
3036 mem_chain
= &(*mem_chain
)->next
;
3039 unchain_one_elt_loc_list (p
);
3042 if (had_locs
&& v
->locs
== 0)
3048 /* Invalidate any locations in the table which are changed because of a
3049 store to MEM_RTX. If this is called because of a non-const call
3050 instruction, MEM_RTX is (mem:BLK const0_rtx). */
3053 cselib_invalidate_mem (mem_rtx
)
3056 htab_traverse (hash_table
, cselib_invalidate_mem_1
, mem_rtx
);
3059 /* Invalidate DEST, which is being assigned to or clobbered. The second and
3060 the third parameter exist so that this function can be passed to
3061 note_stores; they are ignored. */
3064 cselib_invalidate_rtx (dest
, ignore
, data
)
3066 rtx ignore ATTRIBUTE_UNUSED
;
3067 void *data ATTRIBUTE_UNUSED
;
3069 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SIGN_EXTRACT
3070 || GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SUBREG
)
3071 dest
= XEXP (dest
, 0);
3073 if (GET_CODE (dest
) == REG
)
3074 cselib_invalidate_regno (REGNO (dest
), GET_MODE (dest
));
3075 else if (GET_CODE (dest
) == MEM
)
3076 cselib_invalidate_mem (dest
);
3078 /* Some machines don't define AUTO_INC_DEC, but they still use push
3079 instructions. We need to catch that case here in order to
3080 invalidate the stack pointer correctly. Note that invalidating
3081 the stack pointer is different from invalidating DEST. */
3082 if (push_operand (dest
, GET_MODE (dest
)))
3083 cselib_invalidate_rtx (stack_pointer_rtx
, NULL_RTX
, NULL
);
3086 /* Record the result of a SET instruction. DEST is being set; the source
3087 contains the value described by SRC_ELT. If DEST is a MEM, DEST_ADDR_ELT
3088 describes its address. */
3091 cselib_record_set (dest
, src_elt
, dest_addr_elt
)
3093 cselib_val
*src_elt
, *dest_addr_elt
;
3095 int dreg
= GET_CODE (dest
) == REG
? (int) REGNO (dest
) : -1;
3097 if (src_elt
== 0 || side_effects_p (dest
))
3102 REG_VALUES (dreg
) = new_elt_list (REG_VALUES (dreg
), src_elt
);
3103 if (src_elt
->locs
== 0)
3105 src_elt
->locs
= new_elt_loc_list (src_elt
->locs
, dest
);
3107 else if (GET_CODE (dest
) == MEM
&& dest_addr_elt
!= 0)
3109 if (src_elt
->locs
== 0)
3111 add_mem_for_addr (dest_addr_elt
, src_elt
, dest
);
3115 /* Describe a single set that is part of an insn. */
3120 cselib_val
*src_elt
;
3121 cselib_val
*dest_addr_elt
;
3124 /* There is no good way to determine how many elements there can be
3125 in a PARALLEL. Since it's fairly cheap, use a really large number. */
3126 #define MAX_SETS (FIRST_PSEUDO_REGISTER * 2)
3128 /* Record the effects of any sets in INSN. */
3130 cselib_record_sets (insn
)
3135 struct set sets
[MAX_SETS
];
3136 rtx body
= PATTERN (insn
);
3138 body
= PATTERN (insn
);
3139 /* Find all sets. */
3140 if (GET_CODE (body
) == SET
)
3142 sets
[0].src
= SET_SRC (body
);
3143 sets
[0].dest
= SET_DEST (body
);
3146 else if (GET_CODE (body
) == PARALLEL
)
3148 /* Look through the PARALLEL and record the values being
3149 set, if possible. Also handle any CLOBBERs. */
3150 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; --i
)
3152 rtx x
= XVECEXP (body
, 0, i
);
3154 if (GET_CODE (x
) == SET
)
3156 sets
[n_sets
].src
= SET_SRC (x
);
3157 sets
[n_sets
].dest
= SET_DEST (x
);
3163 /* Look up the values that are read. Do this before invalidating the
3164 locations that are written. */
3165 for (i
= 0; i
< n_sets
; i
++)
3167 sets
[i
].src_elt
= cselib_lookup (sets
[i
].src
, GET_MODE (sets
[i
].dest
),
3169 if (GET_CODE (sets
[i
].dest
) == MEM
)
3170 sets
[i
].dest_addr_elt
= cselib_lookup (XEXP (sets
[i
].dest
, 0), Pmode
,
3173 sets
[i
].dest_addr_elt
= 0;
3176 /* Invalidate all locations written by this insn. Note that the elts we
3177 looked up in the previous loop aren't affected, just some of their
3178 locations may go away. */
3179 note_stores (body
, cselib_invalidate_rtx
, NULL
);
3181 /* Now enter the equivalences in our tables. */
3182 for (i
= 0; i
< n_sets
; i
++)
3183 cselib_record_set (sets
[i
].dest
, sets
[i
].src_elt
, sets
[i
].dest_addr_elt
);
3186 /* Record the effects of INSN. */
3189 cselib_process_insn (insn
)
3195 cselib_current_insn
= insn
;
3197 /* Forget everything at a CODE_LABEL, a volatile asm, or a setjmp. */
3198 if (GET_CODE (insn
) == CODE_LABEL
3199 || (GET_CODE (insn
) == NOTE
3200 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_SETJMP
)
3201 || (GET_CODE (insn
) == INSN
3202 && GET_CODE (PATTERN (insn
)) == ASM_OPERANDS
3203 && MEM_VOLATILE_P (PATTERN (insn
))))
3209 if (! INSN_P (insn
))
3211 cselib_current_insn
= 0;
3215 /* If this is a call instruction, forget anything stored in a
3216 call clobbered register, or, if this is not a const call, in
3218 if (GET_CODE (insn
) == CALL_INSN
)
3220 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
3221 if (call_used_regs
[i
])
3222 cselib_invalidate_regno (i
, VOIDmode
);
3224 if (! CONST_CALL_P (insn
))
3225 cselib_invalidate_mem (callmem
);
3228 cselib_record_sets (insn
);
3231 /* Clobber any registers which appear in REG_INC notes. We
3232 could keep track of the changes to their values, but it is
3233 unlikely to help. */
3234 for (x
= REG_NOTES (insn
); x
; x
= XEXP (x
, 1))
3235 if (REG_NOTE_KIND (x
) == REG_INC
)
3236 cselib_invalidate_rtx (XEXP (x
, 0), NULL_RTX
, NULL
);
3239 /* Look for any CLOBBERs in CALL_INSN_FUNCTION_USAGE, but only
3240 after we have processed the insn. */
3241 if (GET_CODE (insn
) == CALL_INSN
)
3242 for (x
= CALL_INSN_FUNCTION_USAGE (insn
); x
; x
= XEXP (x
, 1))
3243 if (GET_CODE (XEXP (x
, 0)) == CLOBBER
)
3244 cselib_invalidate_rtx (XEXP (XEXP (x
, 0), 0), NULL_RTX
, NULL
);
3246 cselib_current_insn
= 0;
3248 if (n_useless_values
> MAX_USELESS_VALUES
)
3249 remove_useless_values ();
3252 /* Make sure our varrays are big enough. Not called from any cselib routines;
3253 it must be called by the user if it allocated new registers. */
3256 cselib_update_varray_sizes ()
3258 unsigned int nregs
= max_reg_num ();
3260 if (nregs
== cselib_nregs
)
3263 cselib_nregs
= nregs
;
3264 VARRAY_GROW (reg_values
, nregs
);
3267 /* Initialize cselib for one pass. The caller must also call
3268 init_alias_analysis. */
3273 /* These are only created once. */
3276 gcc_obstack_init (&cselib_obstack
);
3277 cselib_startobj
= obstack_alloc (&cselib_obstack
, 0);
3279 callmem
= gen_rtx_MEM (BLKmode
, const0_rtx
);
3280 ggc_add_rtx_root (&callmem
, 1);
3283 cselib_nregs
= max_reg_num ();
3284 VARRAY_ELT_LIST_INIT (reg_values
, cselib_nregs
, "reg_values");
3285 hash_table
= htab_create (31, get_value_hash
, entry_and_rtx_equal_p
, NULL
);
3289 /* Called when the current user is done with cselib. */
3295 htab_delete (hash_table
);