1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
28 #include "insn-config.h"
42 #include "basic-block.h"
45 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
46 #include "addresses.h"
49 /* Forward declarations */
50 static void set_of_1 (rtx
, const_rtx
, void *);
51 static bool covers_regno_p (const_rtx
, unsigned int);
52 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
53 static int computed_jump_p_1 (const_rtx
);
54 static void parms_set (rtx
, const_rtx
, void *);
56 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, machine_mode
,
57 const_rtx
, machine_mode
,
58 unsigned HOST_WIDE_INT
);
59 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, machine_mode
,
60 const_rtx
, machine_mode
,
61 unsigned HOST_WIDE_INT
);
62 static unsigned int cached_num_sign_bit_copies (const_rtx
, machine_mode
, const_rtx
,
65 static unsigned int num_sign_bit_copies1 (const_rtx
, machine_mode
, const_rtx
,
66 machine_mode
, unsigned int);
68 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
69 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
71 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
72 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
73 SIGN_EXTEND then while narrowing we also have to enforce the
74 representation and sign-extend the value to mode DESTINATION_REP.
76 If the value is already sign-extended to DESTINATION_REP mode we
77 can just switch to DESTINATION mode on it. For each pair of
78 integral modes SOURCE and DESTINATION, when truncating from SOURCE
79 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
80 contains the number of high-order bits in SOURCE that have to be
81 copies of the sign-bit so that we can do this mode-switch to
85 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
87 /* Store X into index I of ARRAY. ARRAY is known to have at least I
88 elements. Return the new base of ARRAY. */
91 typename
T::value_type
*
92 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
94 size_t i
, value_type x
)
96 if (base
== array
.stack
)
103 gcc_checking_assert (i
== LOCAL_ELEMS
);
104 vec_safe_grow (array
.heap
, i
+ 1);
105 base
= array
.heap
->address ();
106 memcpy (base
, array
.stack
, sizeof (array
.stack
));
107 base
[LOCAL_ELEMS
] = x
;
110 unsigned int length
= array
.heap
->length ();
113 gcc_checking_assert (base
== array
.heap
->address ());
119 gcc_checking_assert (i
== length
);
120 vec_safe_push (array
.heap
, x
);
121 return array
.heap
->address ();
125 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
126 number of elements added to the worklist. */
128 template <typename T
>
130 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
132 size_t end
, rtx_type x
)
134 enum rtx_code code
= GET_CODE (x
);
135 const char *format
= GET_RTX_FORMAT (code
);
136 size_t orig_end
= end
;
137 if (__builtin_expect (INSN_P (x
), false))
139 /* Put the pattern at the top of the queue, since that's what
140 we're likely to want most. It also allows for the SEQUENCE
142 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
143 if (format
[i
] == 'e')
145 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
146 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
149 base
= add_single_to_queue (array
, base
, end
++, subx
);
153 for (int i
= 0; format
[i
]; ++i
)
154 if (format
[i
] == 'e')
156 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
157 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
160 base
= add_single_to_queue (array
, base
, end
++, subx
);
162 else if (format
[i
] == 'E')
164 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
165 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
166 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
167 for (unsigned int j
= 0; j
< length
; j
++)
168 base
[end
++] = T::get_value (vec
[j
]);
170 for (unsigned int j
= 0; j
< length
; j
++)
171 base
= add_single_to_queue (array
, base
, end
++,
172 T::get_value (vec
[j
]));
173 if (code
== SEQUENCE
&& end
== length
)
174 /* If the subrtxes of the sequence fill the entire array then
175 we know that no other parts of a containing insn are queued.
176 The caller is therefore iterating over the sequence as a
177 PATTERN (...), so we also want the patterns of the
179 for (unsigned int j
= 0; j
< length
; j
++)
181 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
183 base
[j
] = T::get_value (PATTERN (x
));
186 return end
- orig_end
;
189 template <typename T
>
191 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
193 vec_free (array
.heap
);
196 template <typename T
>
197 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
199 template class generic_subrtx_iterator
<const_rtx_accessor
>;
200 template class generic_subrtx_iterator
<rtx_var_accessor
>;
201 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
203 /* Return 1 if the value of X is unstable
204 (would be different at a different point in the program).
205 The frame pointer, arg pointer, etc. are considered stable
206 (within one function) and so is anything marked `unchanging'. */
209 rtx_unstable_p (const_rtx x
)
211 const RTX_CODE code
= GET_CODE (x
);
218 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
227 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
228 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
229 /* The arg pointer varies if it is not a fixed register. */
230 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
232 /* ??? When call-clobbered, the value is stable modulo the restore
233 that must happen after a call. This currently screws up local-alloc
234 into believing that the restore is not needed. */
235 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
240 if (MEM_VOLATILE_P (x
))
249 fmt
= GET_RTX_FORMAT (code
);
250 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
253 if (rtx_unstable_p (XEXP (x
, i
)))
256 else if (fmt
[i
] == 'E')
259 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
260 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
267 /* Return 1 if X has a value that can vary even between two
268 executions of the program. 0 means X can be compared reliably
269 against certain constants or near-constants.
270 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
271 zero, we are slightly more conservative.
272 The frame pointer and the arg pointer are considered constant. */
275 rtx_varies_p (const_rtx x
, bool for_alias
)
288 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
297 /* Note that we have to test for the actual rtx used for the frame
298 and arg pointers and not just the register number in case we have
299 eliminated the frame and/or arg pointer and are using it
301 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
302 /* The arg pointer varies if it is not a fixed register. */
303 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
305 if (x
== pic_offset_table_rtx
306 /* ??? When call-clobbered, the value is stable modulo the restore
307 that must happen after a call. This currently screws up
308 local-alloc into believing that the restore is not needed, so we
309 must return 0 only if we are called from alias analysis. */
310 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
315 /* The operand 0 of a LO_SUM is considered constant
316 (in fact it is related specifically to operand 1)
317 during alias analysis. */
318 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
319 || rtx_varies_p (XEXP (x
, 1), for_alias
);
322 if (MEM_VOLATILE_P (x
))
331 fmt
= GET_RTX_FORMAT (code
);
332 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
335 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
338 else if (fmt
[i
] == 'E')
341 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
342 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
349 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
350 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
351 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
352 references on strict alignment machines. */
355 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
356 machine_mode mode
, bool unaligned_mems
)
358 enum rtx_code code
= GET_CODE (x
);
360 /* The offset must be a multiple of the mode size if we are considering
361 unaligned memory references on strict alignment machines. */
362 if (STRICT_ALIGNMENT
&& unaligned_mems
&& GET_MODE_SIZE (mode
) != 0)
364 HOST_WIDE_INT actual_offset
= offset
;
366 #ifdef SPARC_STACK_BOUNDARY_HACK
367 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
368 the real alignment of %sp. However, when it does this, the
369 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
370 if (SPARC_STACK_BOUNDARY_HACK
371 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
372 actual_offset
-= STACK_POINTER_OFFSET
;
375 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
382 if (SYMBOL_REF_WEAK (x
))
384 if (!CONSTANT_POOL_ADDRESS_P (x
))
387 HOST_WIDE_INT decl_size
;
392 size
= GET_MODE_SIZE (mode
);
396 /* If the size of the access or of the symbol is unknown,
398 decl
= SYMBOL_REF_DECL (x
);
400 /* Else check that the access is in bounds. TODO: restructure
401 expr_size/tree_expr_size/int_expr_size and just use the latter. */
404 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
405 decl_size
= (tree_fits_shwi_p (DECL_SIZE_UNIT (decl
))
406 ? tree_to_shwi (DECL_SIZE_UNIT (decl
))
408 else if (TREE_CODE (decl
) == STRING_CST
)
409 decl_size
= TREE_STRING_LENGTH (decl
);
410 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
411 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
415 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
424 /* Stack references are assumed not to trap, but we need to deal with
425 nonsensical offsets. */
426 if (x
== frame_pointer_rtx
)
428 HOST_WIDE_INT adj_offset
= offset
- STARTING_FRAME_OFFSET
;
430 size
= GET_MODE_SIZE (mode
);
431 if (FRAME_GROWS_DOWNWARD
)
433 if (adj_offset
< frame_offset
|| adj_offset
+ size
- 1 >= 0)
438 if (adj_offset
< 0 || adj_offset
+ size
- 1 >= frame_offset
)
443 /* ??? Need to add a similar guard for nonsensical offsets. */
444 if (x
== hard_frame_pointer_rtx
445 || x
== stack_pointer_rtx
446 /* The arg pointer varies if it is not a fixed register. */
447 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
449 /* All of the virtual frame registers are stack references. */
450 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
451 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
456 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
457 mode
, unaligned_mems
);
460 /* An address is assumed not to trap if:
461 - it is the pic register plus a constant. */
462 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
465 /* - or it is an address that can't trap plus a constant integer. */
466 if (CONST_INT_P (XEXP (x
, 1))
467 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
468 size
, mode
, unaligned_mems
))
475 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
476 mode
, unaligned_mems
);
483 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
484 mode
, unaligned_mems
);
490 /* If it isn't one of the case above, it can cause a trap. */
494 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
497 rtx_addr_can_trap_p (const_rtx x
)
499 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
502 /* Return true if X is an address that is known to not be zero. */
505 nonzero_address_p (const_rtx x
)
507 const enum rtx_code code
= GET_CODE (x
);
512 return !SYMBOL_REF_WEAK (x
);
518 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
519 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
520 || x
== stack_pointer_rtx
521 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
523 /* All of the virtual frame registers are stack references. */
524 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
525 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
530 return nonzero_address_p (XEXP (x
, 0));
533 /* Handle PIC references. */
534 if (XEXP (x
, 0) == pic_offset_table_rtx
535 && CONSTANT_P (XEXP (x
, 1)))
540 /* Similar to the above; allow positive offsets. Further, since
541 auto-inc is only allowed in memories, the register must be a
543 if (CONST_INT_P (XEXP (x
, 1))
544 && INTVAL (XEXP (x
, 1)) > 0)
546 return nonzero_address_p (XEXP (x
, 0));
549 /* Similarly. Further, the offset is always positive. */
556 return nonzero_address_p (XEXP (x
, 0));
559 return nonzero_address_p (XEXP (x
, 1));
565 /* If it isn't one of the case above, might be zero. */
569 /* Return 1 if X refers to a memory location whose address
570 cannot be compared reliably with constant addresses,
571 or if X refers to a BLKmode memory object.
572 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
573 zero, we are slightly more conservative. */
576 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
587 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
589 fmt
= GET_RTX_FORMAT (code
);
590 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
593 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
596 else if (fmt
[i
] == 'E')
599 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
600 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
606 /* Return the CALL in X if there is one. */
609 get_call_rtx_from (rtx x
)
613 if (GET_CODE (x
) == PARALLEL
)
614 x
= XVECEXP (x
, 0, 0);
615 if (GET_CODE (x
) == SET
)
617 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
622 /* Return the value of the integer term in X, if one is apparent;
624 Only obvious integer terms are detected.
625 This is used in cse.c with the `related_value' field. */
628 get_integer_term (const_rtx x
)
630 if (GET_CODE (x
) == CONST
)
633 if (GET_CODE (x
) == MINUS
634 && CONST_INT_P (XEXP (x
, 1)))
635 return - INTVAL (XEXP (x
, 1));
636 if (GET_CODE (x
) == PLUS
637 && CONST_INT_P (XEXP (x
, 1)))
638 return INTVAL (XEXP (x
, 1));
642 /* If X is a constant, return the value sans apparent integer term;
644 Only obvious integer terms are detected. */
647 get_related_value (const_rtx x
)
649 if (GET_CODE (x
) != CONST
)
652 if (GET_CODE (x
) == PLUS
653 && CONST_INT_P (XEXP (x
, 1)))
655 else if (GET_CODE (x
) == MINUS
656 && CONST_INT_P (XEXP (x
, 1)))
661 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
662 to somewhere in the same object or object_block as SYMBOL. */
665 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
669 if (GET_CODE (symbol
) != SYMBOL_REF
)
677 if (CONSTANT_POOL_ADDRESS_P (symbol
)
678 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
681 decl
= SYMBOL_REF_DECL (symbol
);
682 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
686 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
687 && SYMBOL_REF_BLOCK (symbol
)
688 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
689 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
690 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
696 /* Split X into a base and a constant offset, storing them in *BASE_OUT
697 and *OFFSET_OUT respectively. */
700 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
702 if (GET_CODE (x
) == CONST
)
705 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
707 *base_out
= XEXP (x
, 0);
708 *offset_out
= XEXP (x
, 1);
713 *offset_out
= const0_rtx
;
716 /* Return the number of places FIND appears within X. If COUNT_DEST is
717 zero, we do not count occurrences inside the destination of a SET. */
720 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
724 const char *format_ptr
;
743 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
745 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
749 if (MEM_P (find
) && rtx_equal_p (x
, find
))
754 if (SET_DEST (x
) == find
&& ! count_dest
)
755 return count_occurrences (SET_SRC (x
), find
, count_dest
);
762 format_ptr
= GET_RTX_FORMAT (code
);
765 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
767 switch (*format_ptr
++)
770 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
774 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
775 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
783 /* Return TRUE if OP is a register or subreg of a register that
784 holds an unsigned quantity. Otherwise, return FALSE. */
787 unsigned_reg_p (rtx op
)
791 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
794 if (GET_CODE (op
) == SUBREG
795 && SUBREG_PROMOTED_SIGN (op
))
802 /* Nonzero if register REG appears somewhere within IN.
803 Also works if REG is not a register; in this case it checks
804 for a subexpression of IN that is Lisp "equal" to REG. */
807 reg_mentioned_p (const_rtx reg
, const_rtx in
)
819 if (GET_CODE (in
) == LABEL_REF
)
820 return reg
== LABEL_REF_LABEL (in
);
822 code
= GET_CODE (in
);
826 /* Compare registers by number. */
828 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
830 /* These codes have no constituent expressions
838 /* These are kept unique for a given value. */
845 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
848 fmt
= GET_RTX_FORMAT (code
);
850 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
855 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
856 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
859 else if (fmt
[i
] == 'e'
860 && reg_mentioned_p (reg
, XEXP (in
, i
)))
866 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
867 no CODE_LABEL insn. */
870 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
875 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
881 /* Nonzero if register REG is used in an insn between
882 FROM_INSN and TO_INSN (exclusive of those two). */
885 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
886 const rtx_insn
*to_insn
)
890 if (from_insn
== to_insn
)
893 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
894 if (NONDEBUG_INSN_P (insn
)
895 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
896 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
901 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
902 is entirely replaced by a new value and the only use is as a SET_DEST,
903 we do not consider it a reference. */
906 reg_referenced_p (const_rtx x
, const_rtx body
)
910 switch (GET_CODE (body
))
913 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
916 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
917 of a REG that occupies all of the REG, the insn references X if
918 it is mentioned in the destination. */
919 if (GET_CODE (SET_DEST (body
)) != CC0
920 && GET_CODE (SET_DEST (body
)) != PC
921 && !REG_P (SET_DEST (body
))
922 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
923 && REG_P (SUBREG_REG (SET_DEST (body
)))
924 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
925 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
926 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
927 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
928 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
933 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
934 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
941 return reg_overlap_mentioned_p (x
, body
);
944 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
947 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
950 case UNSPEC_VOLATILE
:
951 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
952 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
957 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
958 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
963 if (MEM_P (XEXP (body
, 0)))
964 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
969 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
971 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
978 /* Nonzero if register REG is set or clobbered in an insn between
979 FROM_INSN and TO_INSN (exclusive of those two). */
982 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
983 const rtx_insn
*to_insn
)
985 const rtx_insn
*insn
;
987 if (from_insn
== to_insn
)
990 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
991 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
996 /* Internals of reg_set_between_p. */
998 reg_set_p (const_rtx reg
, const_rtx insn
)
1000 /* We can be passed an insn or part of one. If we are passed an insn,
1001 check if a side-effect of the insn clobbers REG. */
1003 && (FIND_REG_INC_NOTE (insn
, reg
)
1006 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1007 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
1008 GET_MODE (reg
), REGNO (reg
)))
1010 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1013 return set_of (reg
, insn
) != NULL_RTX
;
1016 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1017 only if none of them are modified between START and END. Return 1 if
1018 X contains a MEM; this routine does use memory aliasing. */
1021 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1023 const enum rtx_code code
= GET_CODE (x
);
1044 if (modified_between_p (XEXP (x
, 0), start
, end
))
1046 if (MEM_READONLY_P (x
))
1048 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1049 if (memory_modified_in_insn_p (x
, insn
))
1055 return reg_set_between_p (x
, start
, end
);
1061 fmt
= GET_RTX_FORMAT (code
);
1062 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1064 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1067 else if (fmt
[i
] == 'E')
1068 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1069 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1076 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1077 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1078 does use memory aliasing. */
1081 modified_in_p (const_rtx x
, const_rtx insn
)
1083 const enum rtx_code code
= GET_CODE (x
);
1100 if (modified_in_p (XEXP (x
, 0), insn
))
1102 if (MEM_READONLY_P (x
))
1104 if (memory_modified_in_insn_p (x
, insn
))
1110 return reg_set_p (x
, insn
);
1116 fmt
= GET_RTX_FORMAT (code
);
1117 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1119 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1122 else if (fmt
[i
] == 'E')
1123 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1124 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1131 /* Helper function for set_of. */
1139 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1141 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1142 if (rtx_equal_p (x
, data
->pat
)
1143 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1147 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1148 (either directly or via STRICT_LOW_PART and similar modifiers). */
1150 set_of (const_rtx pat
, const_rtx insn
)
1152 struct set_of_data data
;
1153 data
.found
= NULL_RTX
;
1155 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1159 /* Add all hard register in X to *PSET. */
1161 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1163 subrtx_iterator::array_type array
;
1164 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1166 const_rtx x
= *iter
;
1167 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1168 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1172 /* This function, called through note_stores, collects sets and
1173 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1176 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1178 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1179 if (REG_P (x
) && HARD_REGISTER_P (x
))
1180 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1183 /* Examine INSN, and compute the set of hard registers written by it.
1184 Store it in *PSET. Should only be called after reload. */
1186 find_all_hard_reg_sets (const_rtx insn
, HARD_REG_SET
*pset
, bool implicit
)
1190 CLEAR_HARD_REG_SET (*pset
);
1191 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1195 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1197 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1198 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1200 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1201 if (REG_NOTE_KIND (link
) == REG_INC
)
1202 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1205 /* Like record_hard_reg_sets, but called through note_uses. */
1207 record_hard_reg_uses (rtx
*px
, void *data
)
1209 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1212 /* Given an INSN, return a SET expression if this insn has only a single SET.
1213 It may also have CLOBBERs, USEs, or SET whose output
1214 will not be used, which we ignore. */
1217 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1220 int set_verified
= 1;
1223 if (GET_CODE (pat
) == PARALLEL
)
1225 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1227 rtx sub
= XVECEXP (pat
, 0, i
);
1228 switch (GET_CODE (sub
))
1235 /* We can consider insns having multiple sets, where all
1236 but one are dead as single set insns. In common case
1237 only single set is present in the pattern so we want
1238 to avoid checking for REG_UNUSED notes unless necessary.
1240 When we reach set first time, we just expect this is
1241 the single set we are looking for and only when more
1242 sets are found in the insn, we check them. */
1245 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1246 && !side_effects_p (set
))
1252 set
= sub
, set_verified
= 0;
1253 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1254 || side_effects_p (sub
))
1266 /* Given an INSN, return nonzero if it has more than one SET, else return
1270 multiple_sets (const_rtx insn
)
1275 /* INSN must be an insn. */
1276 if (! INSN_P (insn
))
1279 /* Only a PARALLEL can have multiple SETs. */
1280 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1282 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1283 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1285 /* If we have already found a SET, then return now. */
1293 /* Either zero or one SET. */
1297 /* Return nonzero if the destination of SET equals the source
1298 and there are no side effects. */
1301 set_noop_p (const_rtx set
)
1303 rtx src
= SET_SRC (set
);
1304 rtx dst
= SET_DEST (set
);
1306 if (dst
== pc_rtx
&& src
== pc_rtx
)
1309 if (MEM_P (dst
) && MEM_P (src
))
1310 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1312 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1313 return rtx_equal_p (XEXP (dst
, 0), src
)
1314 && ! BYTES_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1315 && !side_effects_p (src
);
1317 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1318 dst
= XEXP (dst
, 0);
1320 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1322 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1324 src
= SUBREG_REG (src
);
1325 dst
= SUBREG_REG (dst
);
1328 /* It is a NOOP if destination overlaps with selected src vector
1330 if (GET_CODE (src
) == VEC_SELECT
1331 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1332 && HARD_REGISTER_P (XEXP (src
, 0))
1333 && HARD_REGISTER_P (dst
))
1336 rtx par
= XEXP (src
, 1);
1337 rtx src0
= XEXP (src
, 0);
1338 int c0
= INTVAL (XVECEXP (par
, 0, 0));
1339 HOST_WIDE_INT offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1341 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1342 if (INTVAL (XVECEXP (par
, 0, i
)) != c0
+ i
)
1345 simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1346 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1349 return (REG_P (src
) && REG_P (dst
)
1350 && REGNO (src
) == REGNO (dst
));
1353 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1357 noop_move_p (const_rtx insn
)
1359 rtx pat
= PATTERN (insn
);
1361 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1364 /* Insns carrying these notes are useful later on. */
1365 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1368 /* Check the code to be executed for COND_EXEC. */
1369 if (GET_CODE (pat
) == COND_EXEC
)
1370 pat
= COND_EXEC_CODE (pat
);
1372 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1375 if (GET_CODE (pat
) == PARALLEL
)
1378 /* If nothing but SETs of registers to themselves,
1379 this insn can also be deleted. */
1380 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1382 rtx tem
= XVECEXP (pat
, 0, i
);
1384 if (GET_CODE (tem
) == USE
1385 || GET_CODE (tem
) == CLOBBER
)
1388 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1398 /* Return nonzero if register in range [REGNO, ENDREGNO)
1399 appears either explicitly or implicitly in X
1400 other than being stored into.
1402 References contained within the substructure at LOC do not count.
1403 LOC may be zero, meaning don't ignore anything. */
1406 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1410 unsigned int x_regno
;
1415 /* The contents of a REG_NONNEG note is always zero, so we must come here
1416 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1420 code
= GET_CODE (x
);
1425 x_regno
= REGNO (x
);
1427 /* If we modifying the stack, frame, or argument pointer, it will
1428 clobber a virtual register. In fact, we could be more precise,
1429 but it isn't worth it. */
1430 if ((x_regno
== STACK_POINTER_REGNUM
1431 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1432 || x_regno
== ARG_POINTER_REGNUM
1434 || x_regno
== FRAME_POINTER_REGNUM
)
1435 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1438 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1441 /* If this is a SUBREG of a hard reg, we can see exactly which
1442 registers are being modified. Otherwise, handle normally. */
1443 if (REG_P (SUBREG_REG (x
))
1444 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1446 unsigned int inner_regno
= subreg_regno (x
);
1447 unsigned int inner_endregno
1448 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1449 ? subreg_nregs (x
) : 1);
1451 return endregno
> inner_regno
&& regno
< inner_endregno
;
1457 if (&SET_DEST (x
) != loc
1458 /* Note setting a SUBREG counts as referring to the REG it is in for
1459 a pseudo but not for hard registers since we can
1460 treat each word individually. */
1461 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1462 && loc
!= &SUBREG_REG (SET_DEST (x
))
1463 && REG_P (SUBREG_REG (SET_DEST (x
)))
1464 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1465 && refers_to_regno_p (regno
, endregno
,
1466 SUBREG_REG (SET_DEST (x
)), loc
))
1467 || (!REG_P (SET_DEST (x
))
1468 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1471 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1480 /* X does not match, so try its subexpressions. */
1482 fmt
= GET_RTX_FORMAT (code
);
1483 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1485 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1493 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1496 else if (fmt
[i
] == 'E')
1499 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1500 if (loc
!= &XVECEXP (x
, i
, j
)
1501 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1508 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1509 we check if any register number in X conflicts with the relevant register
1510 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1511 contains a MEM (we don't bother checking for memory addresses that can't
1512 conflict because we expect this to be a rare case. */
1515 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1517 unsigned int regno
, endregno
;
1519 /* If either argument is a constant, then modifying X can not
1520 affect IN. Here we look at IN, we can profitably combine
1521 CONSTANT_P (x) with the switch statement below. */
1522 if (CONSTANT_P (in
))
1526 switch (GET_CODE (x
))
1528 case STRICT_LOW_PART
:
1531 /* Overly conservative. */
1536 regno
= REGNO (SUBREG_REG (x
));
1537 if (regno
< FIRST_PSEUDO_REGISTER
)
1538 regno
= subreg_regno (x
);
1539 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1540 ? subreg_nregs (x
) : 1);
1545 endregno
= END_REGNO (x
);
1547 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1557 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1558 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1561 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1564 else if (fmt
[i
] == 'E')
1567 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1568 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1578 return reg_mentioned_p (x
, in
);
1584 /* If any register in here refers to it we return true. */
1585 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1586 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1587 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1593 gcc_assert (CONSTANT_P (x
));
1598 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1599 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1600 ignored by note_stores, but passed to FUN.
1602 FUN receives three arguments:
1603 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1604 2. the SET or CLOBBER rtx that does the store,
1605 3. the pointer DATA provided to note_stores.
1607 If the item being stored in or clobbered is a SUBREG of a hard register,
1608 the SUBREG will be passed. */
1611 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1615 if (GET_CODE (x
) == COND_EXEC
)
1616 x
= COND_EXEC_CODE (x
);
1618 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1620 rtx dest
= SET_DEST (x
);
1622 while ((GET_CODE (dest
) == SUBREG
1623 && (!REG_P (SUBREG_REG (dest
))
1624 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1625 || GET_CODE (dest
) == ZERO_EXTRACT
1626 || GET_CODE (dest
) == STRICT_LOW_PART
)
1627 dest
= XEXP (dest
, 0);
1629 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1630 each of whose first operand is a register. */
1631 if (GET_CODE (dest
) == PARALLEL
)
1633 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1634 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1635 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1638 (*fun
) (dest
, x
, data
);
1641 else if (GET_CODE (x
) == PARALLEL
)
1642 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1643 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1646 /* Like notes_stores, but call FUN for each expression that is being
1647 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1648 FUN for each expression, not any interior subexpressions. FUN receives a
1649 pointer to the expression and the DATA passed to this function.
1651 Note that this is not quite the same test as that done in reg_referenced_p
1652 since that considers something as being referenced if it is being
1653 partially set, while we do not. */
1656 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1661 switch (GET_CODE (body
))
1664 (*fun
) (&COND_EXEC_TEST (body
), data
);
1665 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1669 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1670 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1674 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1675 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1679 (*fun
) (&XEXP (body
, 0), data
);
1683 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1684 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1688 (*fun
) (&TRAP_CONDITION (body
), data
);
1692 (*fun
) (&XEXP (body
, 0), data
);
1696 case UNSPEC_VOLATILE
:
1697 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1698 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1702 if (MEM_P (XEXP (body
, 0)))
1703 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1708 rtx dest
= SET_DEST (body
);
1710 /* For sets we replace everything in source plus registers in memory
1711 expression in store and operands of a ZERO_EXTRACT. */
1712 (*fun
) (&SET_SRC (body
), data
);
1714 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1716 (*fun
) (&XEXP (dest
, 1), data
);
1717 (*fun
) (&XEXP (dest
, 2), data
);
1720 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1721 dest
= XEXP (dest
, 0);
1724 (*fun
) (&XEXP (dest
, 0), data
);
1729 /* All the other possibilities never store. */
1730 (*fun
) (pbody
, data
);
1735 /* Return nonzero if X's old contents don't survive after INSN.
1736 This will be true if X is (cc0) or if X is a register and
1737 X dies in INSN or because INSN entirely sets X.
1739 "Entirely set" means set directly and not through a SUBREG, or
1740 ZERO_EXTRACT, so no trace of the old contents remains.
1741 Likewise, REG_INC does not count.
1743 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1744 but for this use that makes no difference, since regs don't overlap
1745 during their lifetimes. Therefore, this function may be used
1746 at any time after deaths have been computed.
1748 If REG is a hard reg that occupies multiple machine registers, this
1749 function will only return 1 if each of those registers will be replaced
1753 dead_or_set_p (const_rtx insn
, const_rtx x
)
1755 unsigned int regno
, end_regno
;
1758 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1759 if (GET_CODE (x
) == CC0
)
1762 gcc_assert (REG_P (x
));
1765 end_regno
= END_REGNO (x
);
1766 for (i
= regno
; i
< end_regno
; i
++)
1767 if (! dead_or_set_regno_p (insn
, i
))
1773 /* Return TRUE iff DEST is a register or subreg of a register and
1774 doesn't change the number of words of the inner register, and any
1775 part of the register is TEST_REGNO. */
1778 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
1780 unsigned int regno
, endregno
;
1782 if (GET_CODE (dest
) == SUBREG
1783 && (((GET_MODE_SIZE (GET_MODE (dest
))
1784 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
1785 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
1786 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
1787 dest
= SUBREG_REG (dest
);
1792 regno
= REGNO (dest
);
1793 endregno
= END_REGNO (dest
);
1794 return (test_regno
>= regno
&& test_regno
< endregno
);
1797 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1798 any member matches the covers_regno_no_parallel_p criteria. */
1801 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
1803 if (GET_CODE (dest
) == PARALLEL
)
1805 /* Some targets place small structures in registers for return
1806 values of functions, and those registers are wrapped in
1807 PARALLELs that we may see as the destination of a SET. */
1810 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1812 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
1813 if (inner
!= NULL_RTX
1814 && covers_regno_no_parallel_p (inner
, test_regno
))
1821 return covers_regno_no_parallel_p (dest
, test_regno
);
1824 /* Utility function for dead_or_set_p to check an individual register. */
1827 dead_or_set_regno_p (const_rtx insn
, unsigned int test_regno
)
1831 /* See if there is a death note for something that includes TEST_REGNO. */
1832 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
1836 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
1839 pattern
= PATTERN (insn
);
1841 /* If a COND_EXEC is not executed, the value survives. */
1842 if (GET_CODE (pattern
) == COND_EXEC
)
1845 if (GET_CODE (pattern
) == SET
)
1846 return covers_regno_p (SET_DEST (pattern
), test_regno
);
1847 else if (GET_CODE (pattern
) == PARALLEL
)
1851 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
1853 rtx body
= XVECEXP (pattern
, 0, i
);
1855 if (GET_CODE (body
) == COND_EXEC
)
1856 body
= COND_EXEC_CODE (body
);
1858 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
1859 && covers_regno_p (SET_DEST (body
), test_regno
))
1867 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1868 If DATUM is nonzero, look for one whose datum is DATUM. */
1871 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
1875 gcc_checking_assert (insn
);
1877 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1878 if (! INSN_P (insn
))
1882 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1883 if (REG_NOTE_KIND (link
) == kind
)
1888 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1889 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
1894 /* Return the reg-note of kind KIND in insn INSN which applies to register
1895 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1896 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1897 it might be the case that the note overlaps REGNO. */
1900 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
1904 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1905 if (! INSN_P (insn
))
1908 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1909 if (REG_NOTE_KIND (link
) == kind
1910 /* Verify that it is a register, so that scratch and MEM won't cause a
1912 && REG_P (XEXP (link
, 0))
1913 && REGNO (XEXP (link
, 0)) <= regno
1914 && END_REGNO (XEXP (link
, 0)) > regno
)
1919 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1923 find_reg_equal_equiv_note (const_rtx insn
)
1930 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1931 if (REG_NOTE_KIND (link
) == REG_EQUAL
1932 || REG_NOTE_KIND (link
) == REG_EQUIV
)
1934 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1935 insns that have multiple sets. Checking single_set to
1936 make sure of this is not the proper check, as explained
1937 in the comment in set_unique_reg_note.
1939 This should be changed into an assert. */
1940 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
1947 /* Check whether INSN is a single_set whose source is known to be
1948 equivalent to a constant. Return that constant if so, otherwise
1952 find_constant_src (const rtx_insn
*insn
)
1956 set
= single_set (insn
);
1959 x
= avoid_constant_pool_reference (SET_SRC (set
));
1964 note
= find_reg_equal_equiv_note (insn
);
1965 if (note
&& CONSTANT_P (XEXP (note
, 0)))
1966 return XEXP (note
, 0);
1971 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1972 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1975 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
1977 /* If it's not a CALL_INSN, it can't possibly have a
1978 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1988 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
1990 link
= XEXP (link
, 1))
1991 if (GET_CODE (XEXP (link
, 0)) == code
1992 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
1997 unsigned int regno
= REGNO (datum
);
1999 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2000 to pseudo registers, so don't bother checking. */
2002 if (regno
< FIRST_PSEUDO_REGISTER
)
2004 unsigned int end_regno
= END_HARD_REGNO (datum
);
2007 for (i
= regno
; i
< end_regno
; i
++)
2008 if (find_regno_fusage (insn
, code
, i
))
2016 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2017 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2020 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2024 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2025 to pseudo registers, so don't bother checking. */
2027 if (regno
>= FIRST_PSEUDO_REGISTER
2031 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2035 if (GET_CODE (op
= XEXP (link
, 0)) == code
2036 && REG_P (reg
= XEXP (op
, 0))
2037 && REGNO (reg
) <= regno
2038 && END_HARD_REGNO (reg
) > regno
)
2046 /* Return true if KIND is an integer REG_NOTE. */
2049 int_reg_note_p (enum reg_note kind
)
2051 return kind
== REG_BR_PROB
;
2054 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2055 stored as the pointer to the next register note. */
2058 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2062 gcc_checking_assert (!int_reg_note_p (kind
));
2067 case REG_LABEL_TARGET
:
2068 case REG_LABEL_OPERAND
:
2070 /* These types of register notes use an INSN_LIST rather than an
2071 EXPR_LIST, so that copying is done right and dumps look
2073 note
= alloc_INSN_LIST (datum
, list
);
2074 PUT_REG_NOTE_KIND (note
, kind
);
2078 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2085 /* Add register note with kind KIND and datum DATUM to INSN. */
2088 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2090 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2093 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2096 add_int_reg_note (rtx insn
, enum reg_note kind
, int datum
)
2098 gcc_checking_assert (int_reg_note_p (kind
));
2099 REG_NOTES (insn
) = gen_rtx_INT_LIST ((machine_mode
) kind
,
2100 datum
, REG_NOTES (insn
));
2103 /* Add a register note like NOTE to INSN. */
2106 add_shallow_copy_of_reg_note (rtx insn
, rtx note
)
2108 if (GET_CODE (note
) == INT_LIST
)
2109 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2111 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2114 /* Remove register note NOTE from the REG_NOTES of INSN. */
2117 remove_note (rtx insn
, const_rtx note
)
2121 if (note
== NULL_RTX
)
2124 if (REG_NOTES (insn
) == note
)
2125 REG_NOTES (insn
) = XEXP (note
, 1);
2127 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2128 if (XEXP (link
, 1) == note
)
2130 XEXP (link
, 1) = XEXP (note
, 1);
2134 switch (REG_NOTE_KIND (note
))
2138 df_notes_rescan (as_a
<rtx_insn
*> (insn
));
2145 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2148 remove_reg_equal_equiv_notes (rtx insn
)
2152 loc
= ®_NOTES (insn
);
2155 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2156 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2157 *loc
= XEXP (*loc
, 1);
2159 loc
= &XEXP (*loc
, 1);
2163 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2166 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2173 /* This loop is a little tricky. We cannot just go down the chain because
2174 it is being modified by some actions in the loop. So we just iterate
2175 over the head. We plan to drain the list anyway. */
2176 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2178 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2179 rtx note
= find_reg_equal_equiv_note (insn
);
2181 /* This assert is generally triggered when someone deletes a REG_EQUAL
2182 or REG_EQUIV note by hacking the list manually rather than calling
2186 remove_note (insn
, note
);
2190 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2191 return 1 if it is found. A simple equality test is used to determine if
2195 in_expr_list_p (const_rtx listp
, const_rtx node
)
2199 for (x
= listp
; x
; x
= XEXP (x
, 1))
2200 if (node
== XEXP (x
, 0))
2206 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2207 remove that entry from the list if it is found.
2209 A simple equality test is used to determine if NODE matches. */
2212 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2214 rtx_expr_list
*temp
= *listp
;
2215 rtx prev
= NULL_RTX
;
2219 if (node
== temp
->element ())
2221 /* Splice the node out of the list. */
2223 XEXP (prev
, 1) = temp
->next ();
2225 *listp
= temp
->next ();
2231 temp
= temp
->next ();
2235 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2236 remove that entry from the list if it is found.
2238 A simple equality test is used to determine if NODE matches. */
2241 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2243 rtx_insn_list
*temp
= *listp
;
2248 if (node
== temp
->insn ())
2250 /* Splice the node out of the list. */
2252 XEXP (prev
, 1) = temp
->next ();
2254 *listp
= temp
->next ();
2260 temp
= temp
->next ();
2264 /* Nonzero if X contains any volatile instructions. These are instructions
2265 which may cause unpredictable machine state instructions, and thus no
2266 instructions or register uses should be moved or combined across them.
2267 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2270 volatile_insn_p (const_rtx x
)
2272 const RTX_CODE code
= GET_CODE (x
);
2290 case UNSPEC_VOLATILE
:
2295 if (MEM_VOLATILE_P (x
))
2302 /* Recursively scan the operands of this expression. */
2305 const char *const fmt
= GET_RTX_FORMAT (code
);
2308 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2312 if (volatile_insn_p (XEXP (x
, i
)))
2315 else if (fmt
[i
] == 'E')
2318 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2319 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2327 /* Nonzero if X contains any volatile memory references
2328 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2331 volatile_refs_p (const_rtx x
)
2333 const RTX_CODE code
= GET_CODE (x
);
2349 case UNSPEC_VOLATILE
:
2355 if (MEM_VOLATILE_P (x
))
2362 /* Recursively scan the operands of this expression. */
2365 const char *const fmt
= GET_RTX_FORMAT (code
);
2368 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2372 if (volatile_refs_p (XEXP (x
, i
)))
2375 else if (fmt
[i
] == 'E')
2378 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2379 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2387 /* Similar to above, except that it also rejects register pre- and post-
2391 side_effects_p (const_rtx x
)
2393 const RTX_CODE code
= GET_CODE (x
);
2410 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2411 when some combination can't be done. If we see one, don't think
2412 that we can simplify the expression. */
2413 return (GET_MODE (x
) != VOIDmode
);
2422 case UNSPEC_VOLATILE
:
2428 if (MEM_VOLATILE_P (x
))
2435 /* Recursively scan the operands of this expression. */
2438 const char *fmt
= GET_RTX_FORMAT (code
);
2441 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2445 if (side_effects_p (XEXP (x
, i
)))
2448 else if (fmt
[i
] == 'E')
2451 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2452 if (side_effects_p (XVECEXP (x
, i
, j
)))
2460 /* Return nonzero if evaluating rtx X might cause a trap.
2461 FLAGS controls how to consider MEMs. A nonzero means the context
2462 of the access may have changed from the original, such that the
2463 address may have become invalid. */
2466 may_trap_p_1 (const_rtx x
, unsigned flags
)
2472 /* We make no distinction currently, but this function is part of
2473 the internal target-hooks ABI so we keep the parameter as
2474 "unsigned flags". */
2475 bool code_changed
= flags
!= 0;
2479 code
= GET_CODE (x
);
2482 /* Handle these cases quickly. */
2494 return targetm
.unspec_may_trap_p (x
, flags
);
2496 case UNSPEC_VOLATILE
:
2502 return MEM_VOLATILE_P (x
);
2504 /* Memory ref can trap unless it's a static var or a stack slot. */
2506 /* Recognize specific pattern of stack checking probes. */
2507 if (flag_stack_check
2508 && MEM_VOLATILE_P (x
)
2509 && XEXP (x
, 0) == stack_pointer_rtx
)
2511 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2512 reference; moving it out of context such as when moving code
2513 when optimizing, might cause its address to become invalid. */
2515 || !MEM_NOTRAP_P (x
))
2517 HOST_WIDE_INT size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : 0;
2518 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2519 GET_MODE (x
), code_changed
);
2524 /* Division by a non-constant might trap. */
2529 if (HONOR_SNANS (x
))
2531 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2532 return flag_trapping_math
;
2533 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2538 /* An EXPR_LIST is used to represent a function call. This
2539 certainly may trap. */
2548 /* Some floating point comparisons may trap. */
2549 if (!flag_trapping_math
)
2551 /* ??? There is no machine independent way to check for tests that trap
2552 when COMPARE is used, though many targets do make this distinction.
2553 For instance, sparc uses CCFPE for compares which generate exceptions
2554 and CCFP for compares which do not generate exceptions. */
2557 /* But often the compare has some CC mode, so check operand
2559 if (HONOR_NANS (XEXP (x
, 0))
2560 || HONOR_NANS (XEXP (x
, 1)))
2566 if (HONOR_SNANS (x
))
2568 /* Often comparison is CC mode, so check operand modes. */
2569 if (HONOR_SNANS (XEXP (x
, 0))
2570 || HONOR_SNANS (XEXP (x
, 1)))
2575 /* Conversion of floating point might trap. */
2576 if (flag_trapping_math
&& HONOR_NANS (XEXP (x
, 0)))
2583 /* These operations don't trap even with floating point. */
2587 /* Any floating arithmetic may trap. */
2588 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2592 fmt
= GET_RTX_FORMAT (code
);
2593 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2597 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2600 else if (fmt
[i
] == 'E')
2603 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2604 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2611 /* Return nonzero if evaluating rtx X might cause a trap. */
2614 may_trap_p (const_rtx x
)
2616 return may_trap_p_1 (x
, 0);
2619 /* Same as above, but additionally return nonzero if evaluating rtx X might
2620 cause a fault. We define a fault for the purpose of this function as a
2621 erroneous execution condition that cannot be encountered during the normal
2622 execution of a valid program; the typical example is an unaligned memory
2623 access on a strict alignment machine. The compiler guarantees that it
2624 doesn't generate code that will fault from a valid program, but this
2625 guarantee doesn't mean anything for individual instructions. Consider
2626 the following example:
2628 struct S { int d; union { char *cp; int *ip; }; };
2630 int foo(struct S *s)
2638 on a strict alignment machine. In a valid program, foo will never be
2639 invoked on a structure for which d is equal to 1 and the underlying
2640 unique field of the union not aligned on a 4-byte boundary, but the
2641 expression *s->ip might cause a fault if considered individually.
2643 At the RTL level, potentially problematic expressions will almost always
2644 verify may_trap_p; for example, the above dereference can be emitted as
2645 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2646 However, suppose that foo is inlined in a caller that causes s->cp to
2647 point to a local character variable and guarantees that s->d is not set
2648 to 1; foo may have been effectively translated into pseudo-RTL as:
2651 (set (reg:SI) (mem:SI (%fp - 7)))
2653 (set (reg:QI) (mem:QI (%fp - 7)))
2655 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2656 memory reference to a stack slot, but it will certainly cause a fault
2657 on a strict alignment machine. */
2660 may_trap_or_fault_p (const_rtx x
)
2662 return may_trap_p_1 (x
, 1);
2665 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2666 i.e., an inequality. */
2669 inequality_comparisons_p (const_rtx x
)
2673 const enum rtx_code code
= GET_CODE (x
);
2701 len
= GET_RTX_LENGTH (code
);
2702 fmt
= GET_RTX_FORMAT (code
);
2704 for (i
= 0; i
< len
; i
++)
2708 if (inequality_comparisons_p (XEXP (x
, i
)))
2711 else if (fmt
[i
] == 'E')
2714 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2715 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2723 /* Replace any occurrence of FROM in X with TO. The function does
2724 not enter into CONST_DOUBLE for the replace.
2726 Note that copying is not done so X must not be shared unless all copies
2727 are to be modified. */
2730 replace_rtx (rtx x
, rtx from
, rtx to
)
2738 /* Allow this function to make replacements in EXPR_LISTs. */
2742 if (GET_CODE (x
) == SUBREG
)
2744 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
);
2746 if (CONST_INT_P (new_rtx
))
2748 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
2749 GET_MODE (SUBREG_REG (x
)),
2754 SUBREG_REG (x
) = new_rtx
;
2758 else if (GET_CODE (x
) == ZERO_EXTEND
)
2760 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
);
2762 if (CONST_INT_P (new_rtx
))
2764 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
2765 new_rtx
, GET_MODE (XEXP (x
, 0)));
2769 XEXP (x
, 0) = new_rtx
;
2774 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2775 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2778 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
);
2779 else if (fmt
[i
] == 'E')
2780 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2781 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
), from
, to
);
2787 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
2788 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
2791 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
2793 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
2795 if (JUMP_TABLE_DATA_P (x
))
2798 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
2799 int len
= GET_NUM_ELEM (vec
);
2800 for (int i
= 0; i
< len
; ++i
)
2802 rtx ref
= RTVEC_ELT (vec
, i
);
2803 if (XEXP (ref
, 0) == old_label
)
2805 XEXP (ref
, 0) = new_label
;
2806 if (update_label_nuses
)
2808 ++LABEL_NUSES (new_label
);
2809 --LABEL_NUSES (old_label
);
2816 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2817 field. This is not handled by the iterator because it doesn't
2818 handle unprinted ('0') fields. */
2819 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
2820 JUMP_LABEL (x
) = new_label
;
2822 subrtx_ptr_iterator::array_type array
;
2823 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
2828 if (GET_CODE (x
) == SYMBOL_REF
2829 && CONSTANT_POOL_ADDRESS_P (x
))
2831 rtx c
= get_pool_constant (x
);
2832 if (rtx_referenced_p (old_label
, c
))
2834 /* Create a copy of constant C; replace the label inside
2835 but do not update LABEL_NUSES because uses in constant pool
2837 rtx new_c
= copy_rtx (c
);
2838 replace_label (&new_c
, old_label
, new_label
, false);
2840 /* Add the new constant NEW_C to constant pool and replace
2841 the old reference to constant by new reference. */
2842 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
2843 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
2847 if ((GET_CODE (x
) == LABEL_REF
2848 || GET_CODE (x
) == INSN_LIST
)
2849 && XEXP (x
, 0) == old_label
)
2851 XEXP (x
, 0) = new_label
;
2852 if (update_label_nuses
)
2854 ++LABEL_NUSES (new_label
);
2855 --LABEL_NUSES (old_label
);
2863 replace_label_in_insn (rtx_insn
*insn
, rtx old_label
, rtx new_label
,
2864 bool update_label_nuses
)
2866 rtx insn_as_rtx
= insn
;
2867 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
2868 gcc_checking_assert (insn_as_rtx
== insn
);
2871 /* Return true if X is referenced in BODY. */
2874 rtx_referenced_p (const_rtx x
, const_rtx body
)
2876 subrtx_iterator::array_type array
;
2877 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
2878 if (const_rtx y
= *iter
)
2880 /* Check if a label_ref Y refers to label X. */
2881 if (GET_CODE (y
) == LABEL_REF
2883 && LABEL_REF_LABEL (y
) == x
)
2886 if (rtx_equal_p (x
, y
))
2889 /* If Y is a reference to pool constant traverse the constant. */
2890 if (GET_CODE (y
) == SYMBOL_REF
2891 && CONSTANT_POOL_ADDRESS_P (y
))
2892 iter
.substitute (get_pool_constant (y
));
2897 /* If INSN is a tablejump return true and store the label (before jump table) to
2898 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2901 tablejump_p (const rtx_insn
*insn
, rtx
*labelp
, rtx_jump_table_data
**tablep
)
2908 label
= JUMP_LABEL (insn
);
2909 if (label
!= NULL_RTX
&& !ANY_RETURN_P (label
)
2910 && (table
= NEXT_INSN (as_a
<rtx_insn
*> (label
))) != NULL_RTX
2911 && JUMP_TABLE_DATA_P (table
))
2916 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
2922 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2923 constant that is not in the constant pool and not in the condition
2924 of an IF_THEN_ELSE. */
2927 computed_jump_p_1 (const_rtx x
)
2929 const enum rtx_code code
= GET_CODE (x
);
2946 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
2947 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
2950 return (computed_jump_p_1 (XEXP (x
, 1))
2951 || computed_jump_p_1 (XEXP (x
, 2)));
2957 fmt
= GET_RTX_FORMAT (code
);
2958 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2961 && computed_jump_p_1 (XEXP (x
, i
)))
2964 else if (fmt
[i
] == 'E')
2965 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2966 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
2973 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2975 Tablejumps and casesi insns are not considered indirect jumps;
2976 we can recognize them by a (use (label_ref)). */
2979 computed_jump_p (const_rtx insn
)
2984 rtx pat
= PATTERN (insn
);
2986 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2987 if (JUMP_LABEL (insn
) != NULL
)
2990 if (GET_CODE (pat
) == PARALLEL
)
2992 int len
= XVECLEN (pat
, 0);
2993 int has_use_labelref
= 0;
2995 for (i
= len
- 1; i
>= 0; i
--)
2996 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
2997 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3000 has_use_labelref
= 1;
3004 if (! has_use_labelref
)
3005 for (i
= len
- 1; i
>= 0; i
--)
3006 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3007 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3008 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3011 else if (GET_CODE (pat
) == SET
3012 && SET_DEST (pat
) == pc_rtx
3013 && computed_jump_p_1 (SET_SRC (pat
)))
3021 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3022 the equivalent add insn and pass the result to FN, using DATA as the
3026 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3028 rtx x
= XEXP (mem
, 0);
3029 switch (GET_CODE (x
))
3034 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3035 rtx r1
= XEXP (x
, 0);
3036 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3037 return fn (mem
, x
, r1
, r1
, c
, data
);
3043 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3044 rtx r1
= XEXP (x
, 0);
3045 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3046 return fn (mem
, x
, r1
, r1
, c
, data
);
3052 rtx r1
= XEXP (x
, 0);
3053 rtx add
= XEXP (x
, 1);
3054 return fn (mem
, x
, r1
, add
, NULL
, data
);
3062 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3063 For each such autoinc operation found, call FN, passing it
3064 the innermost enclosing MEM, the operation itself, the RTX modified
3065 by the operation, two RTXs (the second may be NULL) that, once
3066 added, represent the value to be held by the modified RTX
3067 afterwards, and DATA. FN is to return 0 to continue the
3068 traversal or any other value to have it returned to the caller of
3069 for_each_inc_dec. */
3072 for_each_inc_dec (rtx x
,
3073 for_each_inc_dec_fn fn
,
3076 subrtx_var_iterator::array_type array
;
3077 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3082 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3084 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3087 iter
.skip_subrtxes ();
3094 /* Searches X for any reference to REGNO, returning the rtx of the
3095 reference found if any. Otherwise, returns NULL_RTX. */
3098 regno_use_in (unsigned int regno
, rtx x
)
3104 if (REG_P (x
) && REGNO (x
) == regno
)
3107 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3108 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3112 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3115 else if (fmt
[i
] == 'E')
3116 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3117 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3124 /* Return a value indicating whether OP, an operand of a commutative
3125 operation, is preferred as the first or second operand. The higher
3126 the value, the stronger the preference for being the first operand.
3127 We use negative values to indicate a preference for the first operand
3128 and positive values for the second operand. */
3131 commutative_operand_precedence (rtx op
)
3133 enum rtx_code code
= GET_CODE (op
);
3135 /* Constants always come the second operand. Prefer "nice" constants. */
3136 if (code
== CONST_INT
)
3138 if (code
== CONST_WIDE_INT
)
3140 if (code
== CONST_DOUBLE
)
3142 if (code
== CONST_FIXED
)
3144 op
= avoid_constant_pool_reference (op
);
3145 code
= GET_CODE (op
);
3147 switch (GET_RTX_CLASS (code
))
3150 if (code
== CONST_INT
)
3152 if (code
== CONST_WIDE_INT
)
3154 if (code
== CONST_DOUBLE
)
3156 if (code
== CONST_FIXED
)
3161 /* SUBREGs of objects should come second. */
3162 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3167 /* Complex expressions should be the first, so decrease priority
3168 of objects. Prefer pointer objects over non pointer objects. */
3169 if ((REG_P (op
) && REG_POINTER (op
))
3170 || (MEM_P (op
) && MEM_POINTER (op
)))
3174 case RTX_COMM_ARITH
:
3175 /* Prefer operands that are themselves commutative to be first.
3176 This helps to make things linear. In particular,
3177 (and (and (reg) (reg)) (not (reg))) is canonical. */
3181 /* If only one operand is a binary expression, it will be the first
3182 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3183 is canonical, although it will usually be further simplified. */
3187 /* Then prefer NEG and NOT. */
3188 if (code
== NEG
|| code
== NOT
)
3196 /* Return 1 iff it is necessary to swap operands of commutative operation
3197 in order to canonicalize expression. */
3200 swap_commutative_operands_p (rtx x
, rtx y
)
3202 return (commutative_operand_precedence (x
)
3203 < commutative_operand_precedence (y
));
3206 /* Return 1 if X is an autoincrement side effect and the register is
3207 not the stack pointer. */
3209 auto_inc_p (const_rtx x
)
3211 switch (GET_CODE (x
))
3219 /* There are no REG_INC notes for SP. */
3220 if (XEXP (x
, 0) != stack_pointer_rtx
)
3228 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3230 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3239 code
= GET_CODE (in
);
3240 fmt
= GET_RTX_FORMAT (code
);
3241 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3245 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3248 else if (fmt
[i
] == 'E')
3249 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3250 if (loc
== &XVECEXP (in
, i
, j
)
3251 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3257 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3258 and SUBREG_BYTE, return the bit offset where the subreg begins
3259 (counting from the least significant bit of the operand). */
3262 subreg_lsb_1 (machine_mode outer_mode
,
3263 machine_mode inner_mode
,
3264 unsigned int subreg_byte
)
3266 unsigned int bitpos
;
3270 /* A paradoxical subreg begins at bit position 0. */
3271 if (GET_MODE_PRECISION (outer_mode
) > GET_MODE_PRECISION (inner_mode
))
3274 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3275 /* If the subreg crosses a word boundary ensure that
3276 it also begins and ends on a word boundary. */
3277 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3278 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3279 && (subreg_byte
% UNITS_PER_WORD
3280 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3282 if (WORDS_BIG_ENDIAN
)
3283 word
= (GET_MODE_SIZE (inner_mode
)
3284 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3286 word
= subreg_byte
/ UNITS_PER_WORD
;
3287 bitpos
= word
* BITS_PER_WORD
;
3289 if (BYTES_BIG_ENDIAN
)
3290 byte
= (GET_MODE_SIZE (inner_mode
)
3291 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3293 byte
= subreg_byte
% UNITS_PER_WORD
;
3294 bitpos
+= byte
* BITS_PER_UNIT
;
3299 /* Given a subreg X, return the bit offset where the subreg begins
3300 (counting from the least significant bit of the reg). */
3303 subreg_lsb (const_rtx x
)
3305 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3309 /* Fill in information about a subreg of a hard register.
3310 xregno - A regno of an inner hard subreg_reg (or what will become one).
3311 xmode - The mode of xregno.
3312 offset - The byte offset.
3313 ymode - The mode of a top level SUBREG (or what may become one).
3314 info - Pointer to structure to fill in.
3316 Rather than considering one particular inner register (and thus one
3317 particular "outer" register) in isolation, this function really uses
3318 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3319 function does not check whether adding INFO->offset to XREGNO gives
3320 a valid hard register; even if INFO->offset + XREGNO is out of range,
3321 there might be another register of the same type that is in range.
3322 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3323 register, since that can depend on things like whether the final
3324 register number is even or odd. Callers that want to check whether
3325 this particular subreg can be replaced by a simple (reg ...) should
3326 use simplify_subreg_regno. */
3329 subreg_get_info (unsigned int xregno
, machine_mode xmode
,
3330 unsigned int offset
, machine_mode ymode
,
3331 struct subreg_info
*info
)
3333 int nregs_xmode
, nregs_ymode
;
3334 int mode_multiple
, nregs_multiple
;
3335 int offset_adj
, y_offset
, y_offset_adj
;
3336 int regsize_xmode
, regsize_ymode
;
3339 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3343 /* If there are holes in a non-scalar mode in registers, we expect
3344 that it is made up of its units concatenated together. */
3345 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3347 machine_mode xmode_unit
;
3349 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3350 if (GET_MODE_INNER (xmode
) == VOIDmode
)
3353 xmode_unit
= GET_MODE_INNER (xmode
);
3354 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3355 gcc_assert (nregs_xmode
3356 == (GET_MODE_NUNITS (xmode
)
3357 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3358 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3359 == (hard_regno_nregs
[xregno
][xmode_unit
]
3360 * GET_MODE_NUNITS (xmode
)));
3362 /* You can only ask for a SUBREG of a value with holes in the middle
3363 if you don't cross the holes. (Such a SUBREG should be done by
3364 picking a different register class, or doing it in memory if
3365 necessary.) An example of a value with holes is XCmode on 32-bit
3366 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3367 3 for each part, but in memory it's two 128-bit parts.
3368 Padding is assumed to be at the end (not necessarily the 'high part')
3370 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1
3371 < GET_MODE_NUNITS (xmode
))
3372 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3373 != ((offset
+ GET_MODE_SIZE (ymode
) - 1)
3374 / GET_MODE_SIZE (xmode_unit
))))
3376 info
->representable_p
= false;
3381 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3383 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3385 /* Paradoxical subregs are otherwise valid. */
3388 && GET_MODE_PRECISION (ymode
) > GET_MODE_PRECISION (xmode
))
3390 info
->representable_p
= true;
3391 /* If this is a big endian paradoxical subreg, which uses more
3392 actual hard registers than the original register, we must
3393 return a negative offset so that we find the proper highpart
3395 if (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
3396 ? REG_WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
3397 info
->offset
= nregs_xmode
- nregs_ymode
;
3400 info
->nregs
= nregs_ymode
;
3404 /* If registers store different numbers of bits in the different
3405 modes, we cannot generally form this subreg. */
3406 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3407 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3408 && (GET_MODE_SIZE (xmode
) % nregs_xmode
) == 0
3409 && (GET_MODE_SIZE (ymode
) % nregs_ymode
) == 0)
3411 regsize_xmode
= GET_MODE_SIZE (xmode
) / nregs_xmode
;
3412 regsize_ymode
= GET_MODE_SIZE (ymode
) / nregs_ymode
;
3413 if (!rknown
&& regsize_xmode
> regsize_ymode
&& nregs_ymode
> 1)
3415 info
->representable_p
= false;
3417 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3418 info
->offset
= offset
/ regsize_xmode
;
3421 if (!rknown
&& regsize_ymode
> regsize_xmode
&& nregs_xmode
> 1)
3423 info
->representable_p
= false;
3425 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3426 info
->offset
= offset
/ regsize_xmode
;
3431 /* Lowpart subregs are otherwise valid. */
3432 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3434 info
->representable_p
= true;
3437 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3440 info
->nregs
= nregs_ymode
;
3445 /* This should always pass, otherwise we don't know how to verify
3446 the constraint. These conditions may be relaxed but
3447 subreg_regno_offset would need to be redesigned. */
3448 gcc_assert ((GET_MODE_SIZE (xmode
) % GET_MODE_SIZE (ymode
)) == 0);
3449 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3451 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
3452 && GET_MODE_SIZE (xmode
) > UNITS_PER_WORD
)
3454 HOST_WIDE_INT xsize
= GET_MODE_SIZE (xmode
);
3455 HOST_WIDE_INT ysize
= GET_MODE_SIZE (ymode
);
3456 HOST_WIDE_INT off_low
= offset
& (ysize
- 1);
3457 HOST_WIDE_INT off_high
= offset
& ~(ysize
- 1);
3458 offset
= (xsize
- ysize
- off_high
) | off_low
;
3460 /* The XMODE value can be seen as a vector of NREGS_XMODE
3461 values. The subreg must represent a lowpart of given field.
3462 Compute what field it is. */
3463 offset_adj
= offset
;
3464 offset_adj
-= subreg_lowpart_offset (ymode
,
3465 mode_for_size (GET_MODE_BITSIZE (xmode
)
3469 /* Size of ymode must not be greater than the size of xmode. */
3470 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
3471 gcc_assert (mode_multiple
!= 0);
3473 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
3474 y_offset_adj
= offset_adj
/ GET_MODE_SIZE (ymode
);
3475 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
3477 gcc_assert ((offset_adj
% GET_MODE_SIZE (ymode
)) == 0);
3478 gcc_assert ((mode_multiple
% nregs_multiple
) == 0);
3482 info
->representable_p
= (!(y_offset_adj
% (mode_multiple
/ nregs_multiple
)));
3485 info
->offset
= (y_offset
/ (mode_multiple
/ nregs_multiple
)) * nregs_ymode
;
3486 info
->nregs
= nregs_ymode
;
3489 /* This function returns the regno offset of a subreg expression.
3490 xregno - A regno of an inner hard subreg_reg (or what will become one).
3491 xmode - The mode of xregno.
3492 offset - The byte offset.
3493 ymode - The mode of a top level SUBREG (or what may become one).
3494 RETURN - The regno offset which would be used. */
3496 subreg_regno_offset (unsigned int xregno
, machine_mode xmode
,
3497 unsigned int offset
, machine_mode ymode
)
3499 struct subreg_info info
;
3500 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3504 /* This function returns true when the offset is representable via
3505 subreg_offset in the given regno.
3506 xregno - A regno of an inner hard subreg_reg (or what will become one).
3507 xmode - The mode of xregno.
3508 offset - The byte offset.
3509 ymode - The mode of a top level SUBREG (or what may become one).
3510 RETURN - Whether the offset is representable. */
3512 subreg_offset_representable_p (unsigned int xregno
, machine_mode xmode
,
3513 unsigned int offset
, machine_mode ymode
)
3515 struct subreg_info info
;
3516 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3517 return info
.representable_p
;
3520 /* Return the number of a YMODE register to which
3522 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3524 can be simplified. Return -1 if the subreg can't be simplified.
3526 XREGNO is a hard register number. */
3529 simplify_subreg_regno (unsigned int xregno
, machine_mode xmode
,
3530 unsigned int offset
, machine_mode ymode
)
3532 struct subreg_info info
;
3533 unsigned int yregno
;
3535 #ifdef CANNOT_CHANGE_MODE_CLASS
3536 /* Give the backend a chance to disallow the mode change. */
3537 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3538 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3539 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3540 /* We can use mode change in LRA for some transformations. */
3541 && ! lra_in_progress
)
3545 /* We shouldn't simplify stack-related registers. */
3546 if ((!reload_completed
|| frame_pointer_needed
)
3547 && xregno
== FRAME_POINTER_REGNUM
)
3550 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3551 && xregno
== ARG_POINTER_REGNUM
)
3554 if (xregno
== STACK_POINTER_REGNUM
3555 /* We should convert hard stack register in LRA if it is
3557 && ! lra_in_progress
)
3560 /* Try to get the register offset. */
3561 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3562 if (!info
.representable_p
)
3565 /* Make sure that the offsetted register value is in range. */
3566 yregno
= xregno
+ info
.offset
;
3567 if (!HARD_REGISTER_NUM_P (yregno
))
3570 /* See whether (reg:YMODE YREGNO) is valid.
3572 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3573 This is a kludge to work around how complex FP arguments are passed
3574 on IA-64 and should be fixed. See PR target/49226. */
3575 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3576 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3579 return (int) yregno
;
3582 /* Return the final regno that a subreg expression refers to. */
3584 subreg_regno (const_rtx x
)
3587 rtx subreg
= SUBREG_REG (x
);
3588 int regno
= REGNO (subreg
);
3590 ret
= regno
+ subreg_regno_offset (regno
,
3598 /* Return the number of registers that a subreg expression refers
3601 subreg_nregs (const_rtx x
)
3603 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3606 /* Return the number of registers that a subreg REG with REGNO
3607 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3608 changed so that the regno can be passed in. */
3611 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3613 struct subreg_info info
;
3614 rtx subreg
= SUBREG_REG (x
);
3616 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3622 struct parms_set_data
3628 /* Helper function for noticing stores to parameter registers. */
3630 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3632 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3633 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3634 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3636 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3641 /* Look backward for first parameter to be loaded.
3642 Note that loads of all parameters will not necessarily be
3643 found if CSE has eliminated some of them (e.g., an argument
3644 to the outer function is passed down as a parameter).
3645 Do not skip BOUNDARY. */
3647 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
3649 struct parms_set_data parm
;
3651 rtx_insn
*before
, *first_set
;
3653 /* Since different machines initialize their parameter registers
3654 in different orders, assume nothing. Collect the set of all
3655 parameter registers. */
3656 CLEAR_HARD_REG_SET (parm
.regs
);
3658 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3659 if (GET_CODE (XEXP (p
, 0)) == USE
3660 && REG_P (XEXP (XEXP (p
, 0), 0)))
3662 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3664 /* We only care about registers which can hold function
3666 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3669 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3673 first_set
= call_insn
;
3675 /* Search backward for the first set of a register in this set. */
3676 while (parm
.nregs
&& before
!= boundary
)
3678 before
= PREV_INSN (before
);
3680 /* It is possible that some loads got CSEed from one call to
3681 another. Stop in that case. */
3682 if (CALL_P (before
))
3685 /* Our caller needs either ensure that we will find all sets
3686 (in case code has not been optimized yet), or take care
3687 for possible labels in a way by setting boundary to preceding
3689 if (LABEL_P (before
))
3691 gcc_assert (before
== boundary
);
3695 if (INSN_P (before
))
3697 int nregs_old
= parm
.nregs
;
3698 note_stores (PATTERN (before
), parms_set
, &parm
);
3699 /* If we found something that did not set a parameter reg,
3700 we're done. Do not keep going, as that might result
3701 in hoisting an insn before the setting of a pseudo
3702 that is used by the hoisted insn. */
3703 if (nregs_old
!= parm
.nregs
)
3712 /* Return true if we should avoid inserting code between INSN and preceding
3713 call instruction. */
3716 keep_with_call_p (const rtx_insn
*insn
)
3720 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
3722 if (REG_P (SET_DEST (set
))
3723 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
3724 && fixed_regs
[REGNO (SET_DEST (set
))]
3725 && general_operand (SET_SRC (set
), VOIDmode
))
3727 if (REG_P (SET_SRC (set
))
3728 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
3729 && REG_P (SET_DEST (set
))
3730 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3732 /* There may be a stack pop just after the call and before the store
3733 of the return register. Search for the actual store when deciding
3734 if we can break or not. */
3735 if (SET_DEST (set
) == stack_pointer_rtx
)
3737 /* This CONST_CAST is okay because next_nonnote_insn just
3738 returns its argument and we assign it to a const_rtx
3741 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
3742 if (i2
&& keep_with_call_p (i2
))
3749 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3750 to non-complex jumps. That is, direct unconditional, conditional,
3751 and tablejumps, but not computed jumps or returns. It also does
3752 not apply to the fallthru case of a conditional jump. */
3755 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
3757 rtx tmp
= JUMP_LABEL (jump_insn
);
3758 rtx_jump_table_data
*table
;
3763 if (tablejump_p (jump_insn
, NULL
, &table
))
3765 rtvec vec
= table
->get_labels ();
3766 int i
, veclen
= GET_NUM_ELEM (vec
);
3768 for (i
= 0; i
< veclen
; ++i
)
3769 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
3773 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
3780 /* Return an estimate of the cost of computing rtx X.
3781 One use is in cse, to decide which expression to keep in the hash table.
3782 Another is in rtl generation, to pick the cheapest way to multiply.
3783 Other uses like the latter are expected in the future.
3785 X appears as operand OPNO in an expression with code OUTER_CODE.
3786 SPEED specifies whether costs optimized for speed or size should
3790 rtx_cost (rtx x
, enum rtx_code outer_code
, int opno
, bool speed
)
3801 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3802 many insns, taking N times as long. */
3803 factor
= GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
;
3807 /* Compute the default costs of certain things.
3808 Note that targetm.rtx_costs can override the defaults. */
3810 code
= GET_CODE (x
);
3814 /* Multiplication has time-complexity O(N*N), where N is the
3815 number of units (translated from digits) when using
3816 schoolbook long multiplication. */
3817 total
= factor
* factor
* COSTS_N_INSNS (5);
3823 /* Similarly, complexity for schoolbook long division. */
3824 total
= factor
* factor
* COSTS_N_INSNS (7);
3827 /* Used in combine.c as a marker. */
3831 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3832 the mode for the factor. */
3833 factor
= GET_MODE_SIZE (GET_MODE (SET_DEST (x
))) / UNITS_PER_WORD
;
3838 total
= factor
* COSTS_N_INSNS (1);
3848 /* If we can't tie these modes, make this expensive. The larger
3849 the mode, the more expensive it is. */
3850 if (! MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (SUBREG_REG (x
))))
3851 return COSTS_N_INSNS (2 + factor
);
3855 if (targetm
.rtx_costs (x
, code
, outer_code
, opno
, &total
, speed
))
3860 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3861 which is already in total. */
3863 fmt
= GET_RTX_FORMAT (code
);
3864 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3866 total
+= rtx_cost (XEXP (x
, i
), code
, i
, speed
);
3867 else if (fmt
[i
] == 'E')
3868 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3869 total
+= rtx_cost (XVECEXP (x
, i
, j
), code
, i
, speed
);
3874 /* Fill in the structure C with information about both speed and size rtx
3875 costs for X, which is operand OPNO in an expression with code OUTER. */
3878 get_full_rtx_cost (rtx x
, enum rtx_code outer
, int opno
,
3879 struct full_rtx_costs
*c
)
3881 c
->speed
= rtx_cost (x
, outer
, opno
, true);
3882 c
->size
= rtx_cost (x
, outer
, opno
, false);
3886 /* Return cost of address expression X.
3887 Expect that X is properly formed address reference.
3889 SPEED parameter specify whether costs optimized for speed or size should
3893 address_cost (rtx x
, machine_mode mode
, addr_space_t as
, bool speed
)
3895 /* We may be asked for cost of various unusual addresses, such as operands
3896 of push instruction. It is not worthwhile to complicate writing
3897 of the target hook by such cases. */
3899 if (!memory_address_addr_space_p (mode
, x
, as
))
3902 return targetm
.address_cost (x
, mode
, as
, speed
);
3905 /* If the target doesn't override, compute the cost as with arithmetic. */
3908 default_address_cost (rtx x
, machine_mode
, addr_space_t
, bool speed
)
3910 return rtx_cost (x
, MEM
, 0, speed
);
3914 unsigned HOST_WIDE_INT
3915 nonzero_bits (const_rtx x
, machine_mode mode
)
3917 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3921 num_sign_bit_copies (const_rtx x
, machine_mode mode
)
3923 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3926 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3927 It avoids exponential behavior in nonzero_bits1 when X has
3928 identical subexpressions on the first or the second level. */
3930 static unsigned HOST_WIDE_INT
3931 cached_nonzero_bits (const_rtx x
, machine_mode mode
, const_rtx known_x
,
3932 machine_mode known_mode
,
3933 unsigned HOST_WIDE_INT known_ret
)
3935 if (x
== known_x
&& mode
== known_mode
)
3938 /* Try to find identical subexpressions. If found call
3939 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3940 precomputed value for the subexpression as KNOWN_RET. */
3942 if (ARITHMETIC_P (x
))
3944 rtx x0
= XEXP (x
, 0);
3945 rtx x1
= XEXP (x
, 1);
3947 /* Check the first level. */
3949 return nonzero_bits1 (x
, mode
, x0
, mode
,
3950 cached_nonzero_bits (x0
, mode
, known_x
,
3951 known_mode
, known_ret
));
3953 /* Check the second level. */
3954 if (ARITHMETIC_P (x0
)
3955 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
3956 return nonzero_bits1 (x
, mode
, x1
, mode
,
3957 cached_nonzero_bits (x1
, mode
, known_x
,
3958 known_mode
, known_ret
));
3960 if (ARITHMETIC_P (x1
)
3961 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
3962 return nonzero_bits1 (x
, mode
, x0
, mode
,
3963 cached_nonzero_bits (x0
, mode
, known_x
,
3964 known_mode
, known_ret
));
3967 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
3970 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3971 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3972 is less useful. We can't allow both, because that results in exponential
3973 run time recursion. There is a nullstone testcase that triggered
3974 this. This macro avoids accidental uses of num_sign_bit_copies. */
3975 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3977 /* Given an expression, X, compute which bits in X can be nonzero.
3978 We don't care about bits outside of those defined in MODE.
3980 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3981 an arithmetic operation, we can do better. */
3983 static unsigned HOST_WIDE_INT
3984 nonzero_bits1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
3985 machine_mode known_mode
,
3986 unsigned HOST_WIDE_INT known_ret
)
3988 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
3989 unsigned HOST_WIDE_INT inner_nz
;
3991 machine_mode inner_mode
;
3992 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
3994 /* For floating-point and vector values, assume all bits are needed. */
3995 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
3996 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
3999 /* If X is wider than MODE, use its mode instead. */
4000 if (GET_MODE_PRECISION (GET_MODE (x
)) > mode_width
)
4002 mode
= GET_MODE (x
);
4003 nonzero
= GET_MODE_MASK (mode
);
4004 mode_width
= GET_MODE_PRECISION (mode
);
4007 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4008 /* Our only callers in this case look for single bit values. So
4009 just return the mode mask. Those tests will then be false. */
4012 #ifndef WORD_REGISTER_OPERATIONS
4013 /* If MODE is wider than X, but both are a single word for both the host
4014 and target machines, we can compute this from which bits of the
4015 object might be nonzero in its own mode, taking into account the fact
4016 that on many CISC machines, accessing an object in a wider mode
4017 causes the high-order bits to become undefined. So they are
4018 not known to be zero. */
4020 if (GET_MODE (x
) != VOIDmode
&& GET_MODE (x
) != mode
4021 && GET_MODE_PRECISION (GET_MODE (x
)) <= BITS_PER_WORD
4022 && GET_MODE_PRECISION (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
4023 && GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (GET_MODE (x
)))
4025 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
4026 known_x
, known_mode
, known_ret
);
4027 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
4032 code
= GET_CODE (x
);
4036 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4037 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4038 all the bits above ptr_mode are known to be zero. */
4039 /* As we do not know which address space the pointer is referring to,
4040 we can do this only if the target does not support different pointer
4041 or address modes depending on the address space. */
4042 if (target_default_pointer_address_modes_p ()
4043 && POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4045 nonzero
&= GET_MODE_MASK (ptr_mode
);
4048 /* Include declared information about alignment of pointers. */
4049 /* ??? We don't properly preserve REG_POINTER changes across
4050 pointer-to-integer casts, so we can't trust it except for
4051 things that we know must be pointers. See execute/960116-1.c. */
4052 if ((x
== stack_pointer_rtx
4053 || x
== frame_pointer_rtx
4054 || x
== arg_pointer_rtx
)
4055 && REGNO_POINTER_ALIGN (REGNO (x
)))
4057 unsigned HOST_WIDE_INT alignment
4058 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4060 #ifdef PUSH_ROUNDING
4061 /* If PUSH_ROUNDING is defined, it is possible for the
4062 stack to be momentarily aligned only to that amount,
4063 so we pick the least alignment. */
4064 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4065 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
4069 nonzero
&= ~(alignment
- 1);
4073 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4074 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
4075 known_mode
, known_ret
,
4079 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4080 known_mode
, known_ret
);
4082 return nonzero_for_hook
;
4086 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4087 /* If X is negative in MODE, sign-extend the value. */
4089 && mode_width
< BITS_PER_WORD
4090 && (UINTVAL (x
) & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
4092 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4098 #ifdef LOAD_EXTEND_OP
4099 /* In many, if not most, RISC machines, reading a byte from memory
4100 zeros the rest of the register. Noticing that fact saves a lot
4101 of extra zero-extends. */
4102 if (LOAD_EXTEND_OP (GET_MODE (x
)) == ZERO_EXTEND
)
4103 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
4108 case UNEQ
: case LTGT
:
4109 case GT
: case GTU
: case UNGT
:
4110 case LT
: case LTU
: case UNLT
:
4111 case GE
: case GEU
: case UNGE
:
4112 case LE
: case LEU
: case UNLE
:
4113 case UNORDERED
: case ORDERED
:
4114 /* If this produces an integer result, we know which bits are set.
4115 Code here used to clear bits outside the mode of X, but that is
4117 /* Mind that MODE is the mode the caller wants to look at this
4118 operation in, and not the actual operation mode. We can wind
4119 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4120 that describes the results of a vector compare. */
4121 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
4122 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4123 nonzero
= STORE_FLAG_VALUE
;
4128 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4129 and num_sign_bit_copies. */
4130 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4131 == GET_MODE_PRECISION (GET_MODE (x
)))
4135 if (GET_MODE_PRECISION (GET_MODE (x
)) < mode_width
)
4136 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
4141 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4142 and num_sign_bit_copies. */
4143 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4144 == GET_MODE_PRECISION (GET_MODE (x
)))
4150 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4151 known_x
, known_mode
, known_ret
)
4152 & GET_MODE_MASK (mode
));
4156 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4157 known_x
, known_mode
, known_ret
);
4158 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4159 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4163 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4164 Otherwise, show all the bits in the outer mode but not the inner
4166 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4167 known_x
, known_mode
, known_ret
);
4168 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4170 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4171 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4172 inner_nz
|= (GET_MODE_MASK (mode
)
4173 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4176 nonzero
&= inner_nz
;
4180 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4181 known_x
, known_mode
, known_ret
)
4182 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4183 known_x
, known_mode
, known_ret
);
4187 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4189 unsigned HOST_WIDE_INT nonzero0
4190 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4191 known_x
, known_mode
, known_ret
);
4193 /* Don't call nonzero_bits for the second time if it cannot change
4195 if ((nonzero
& nonzero0
) != nonzero
)
4197 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4198 known_x
, known_mode
, known_ret
);
4202 case PLUS
: case MINUS
:
4204 case DIV
: case UDIV
:
4205 case MOD
: case UMOD
:
4206 /* We can apply the rules of arithmetic to compute the number of
4207 high- and low-order zero bits of these operations. We start by
4208 computing the width (position of the highest-order nonzero bit)
4209 and the number of low-order zero bits for each value. */
4211 unsigned HOST_WIDE_INT nz0
4212 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4213 known_x
, known_mode
, known_ret
);
4214 unsigned HOST_WIDE_INT nz1
4215 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4216 known_x
, known_mode
, known_ret
);
4217 int sign_index
= GET_MODE_PRECISION (GET_MODE (x
)) - 1;
4218 int width0
= floor_log2 (nz0
) + 1;
4219 int width1
= floor_log2 (nz1
) + 1;
4220 int low0
= floor_log2 (nz0
& -nz0
);
4221 int low1
= floor_log2 (nz1
& -nz1
);
4222 unsigned HOST_WIDE_INT op0_maybe_minusp
4223 = nz0
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4224 unsigned HOST_WIDE_INT op1_maybe_minusp
4225 = nz1
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4226 unsigned int result_width
= mode_width
;
4232 result_width
= MAX (width0
, width1
) + 1;
4233 result_low
= MIN (low0
, low1
);
4236 result_low
= MIN (low0
, low1
);
4239 result_width
= width0
+ width1
;
4240 result_low
= low0
+ low1
;
4245 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4246 result_width
= width0
;
4251 result_width
= width0
;
4256 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4257 result_width
= MIN (width0
, width1
);
4258 result_low
= MIN (low0
, low1
);
4263 result_width
= MIN (width0
, width1
);
4264 result_low
= MIN (low0
, low1
);
4270 if (result_width
< mode_width
)
4271 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << result_width
) - 1;
4274 nonzero
&= ~(((unsigned HOST_WIDE_INT
) 1 << result_low
) - 1);
4279 if (CONST_INT_P (XEXP (x
, 1))
4280 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4281 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (x
, 1))) - 1;
4285 /* If this is a SUBREG formed for a promoted variable that has
4286 been zero-extended, we know that at least the high-order bits
4287 are zero, though others might be too. */
4289 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4290 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4291 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4292 known_x
, known_mode
, known_ret
);
4294 inner_mode
= GET_MODE (SUBREG_REG (x
));
4295 /* If the inner mode is a single word for both the host and target
4296 machines, we can compute this from which bits of the inner
4297 object might be nonzero. */
4298 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4299 && (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
))
4301 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4302 known_x
, known_mode
, known_ret
);
4304 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4305 /* If this is a typical RISC machine, we only have to worry
4306 about the way loads are extended. */
4307 if ((LOAD_EXTEND_OP (inner_mode
) == SIGN_EXTEND
4308 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4309 : LOAD_EXTEND_OP (inner_mode
) != ZERO_EXTEND
)
4310 || !MEM_P (SUBREG_REG (x
)))
4313 /* On many CISC machines, accessing an object in a wider mode
4314 causes the high-order bits to become undefined. So they are
4315 not known to be zero. */
4316 if (GET_MODE_PRECISION (GET_MODE (x
))
4317 > GET_MODE_PRECISION (inner_mode
))
4318 nonzero
|= (GET_MODE_MASK (GET_MODE (x
))
4319 & ~GET_MODE_MASK (inner_mode
));
4328 /* The nonzero bits are in two classes: any bits within MODE
4329 that aren't in GET_MODE (x) are always significant. The rest of the
4330 nonzero bits are those that are significant in the operand of
4331 the shift when shifted the appropriate number of bits. This
4332 shows that high-order bits are cleared by the right shift and
4333 low-order bits by left shifts. */
4334 if (CONST_INT_P (XEXP (x
, 1))
4335 && INTVAL (XEXP (x
, 1)) >= 0
4336 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4337 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4339 machine_mode inner_mode
= GET_MODE (x
);
4340 unsigned int width
= GET_MODE_PRECISION (inner_mode
);
4341 int count
= INTVAL (XEXP (x
, 1));
4342 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4343 unsigned HOST_WIDE_INT op_nonzero
4344 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4345 known_x
, known_mode
, known_ret
);
4346 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4347 unsigned HOST_WIDE_INT outer
= 0;
4349 if (mode_width
> width
)
4350 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4352 if (code
== LSHIFTRT
)
4354 else if (code
== ASHIFTRT
)
4358 /* If the sign bit may have been nonzero before the shift, we
4359 need to mark all the places it could have been copied to
4360 by the shift as possibly nonzero. */
4361 if (inner
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1 - count
)))
4362 inner
|= (((unsigned HOST_WIDE_INT
) 1 << count
) - 1)
4365 else if (code
== ASHIFT
)
4368 inner
= ((inner
<< (count
% width
)
4369 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4371 nonzero
&= (outer
| inner
);
4377 /* This is at most the number of bits in the mode. */
4378 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4382 /* If CLZ has a known value at zero, then the nonzero bits are
4383 that value, plus the number of bits in the mode minus one. */
4384 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4386 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4392 /* If CTZ has a known value at zero, then the nonzero bits are
4393 that value, plus the number of bits in the mode minus one. */
4394 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4396 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4402 /* This is at most the number of bits in the mode minus 1. */
4403 nonzero
= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4412 unsigned HOST_WIDE_INT nonzero_true
4413 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4414 known_x
, known_mode
, known_ret
);
4416 /* Don't call nonzero_bits for the second time if it cannot change
4418 if ((nonzero
& nonzero_true
) != nonzero
)
4419 nonzero
&= nonzero_true
4420 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4421 known_x
, known_mode
, known_ret
);
4432 /* See the macro definition above. */
4433 #undef cached_num_sign_bit_copies
4436 /* The function cached_num_sign_bit_copies is a wrapper around
4437 num_sign_bit_copies1. It avoids exponential behavior in
4438 num_sign_bit_copies1 when X has identical subexpressions on the
4439 first or the second level. */
4442 cached_num_sign_bit_copies (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4443 machine_mode known_mode
,
4444 unsigned int known_ret
)
4446 if (x
== known_x
&& mode
== known_mode
)
4449 /* Try to find identical subexpressions. If found call
4450 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4451 the precomputed value for the subexpression as KNOWN_RET. */
4453 if (ARITHMETIC_P (x
))
4455 rtx x0
= XEXP (x
, 0);
4456 rtx x1
= XEXP (x
, 1);
4458 /* Check the first level. */
4461 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4462 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4466 /* Check the second level. */
4467 if (ARITHMETIC_P (x0
)
4468 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4470 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4471 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4475 if (ARITHMETIC_P (x1
)
4476 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4478 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4479 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4484 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4487 /* Return the number of bits at the high-order end of X that are known to
4488 be equal to the sign bit. X will be used in mode MODE; if MODE is
4489 VOIDmode, X will be used in its own mode. The returned value will always
4490 be between 1 and the number of bits in MODE. */
4493 num_sign_bit_copies1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4494 machine_mode known_mode
,
4495 unsigned int known_ret
)
4497 enum rtx_code code
= GET_CODE (x
);
4498 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4499 int num0
, num1
, result
;
4500 unsigned HOST_WIDE_INT nonzero
;
4502 /* If we weren't given a mode, use the mode of X. If the mode is still
4503 VOIDmode, we don't know anything. Likewise if one of the modes is
4506 if (mode
== VOIDmode
)
4507 mode
= GET_MODE (x
);
4509 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4510 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4513 /* For a smaller object, just ignore the high bits. */
4514 if (bitwidth
< GET_MODE_PRECISION (GET_MODE (x
)))
4516 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4517 known_x
, known_mode
, known_ret
);
4519 num0
- (int) (GET_MODE_PRECISION (GET_MODE (x
)) - bitwidth
));
4522 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_PRECISION (GET_MODE (x
)))
4524 #ifndef WORD_REGISTER_OPERATIONS
4525 /* If this machine does not do all register operations on the entire
4526 register and MODE is wider than the mode of X, we can say nothing
4527 at all about the high-order bits. */
4530 /* Likewise on machines that do, if the mode of the object is smaller
4531 than a word and loads of that size don't sign extend, we can say
4532 nothing about the high order bits. */
4533 if (GET_MODE_PRECISION (GET_MODE (x
)) < BITS_PER_WORD
4534 #ifdef LOAD_EXTEND_OP
4535 && LOAD_EXTEND_OP (GET_MODE (x
)) != SIGN_EXTEND
4546 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4547 /* If pointers extend signed and this is a pointer in Pmode, say that
4548 all the bits above ptr_mode are known to be sign bit copies. */
4549 /* As we do not know which address space the pointer is referring to,
4550 we can do this only if the target does not support different pointer
4551 or address modes depending on the address space. */
4552 if (target_default_pointer_address_modes_p ()
4553 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4554 && mode
== Pmode
&& REG_POINTER (x
))
4555 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
4559 unsigned int copies_for_hook
= 1, copies
= 1;
4560 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4561 known_mode
, known_ret
,
4565 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4566 known_mode
, known_ret
);
4568 if (copies
> 1 || copies_for_hook
> 1)
4569 return MAX (copies
, copies_for_hook
);
4571 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4576 #ifdef LOAD_EXTEND_OP
4577 /* Some RISC machines sign-extend all loads of smaller than a word. */
4578 if (LOAD_EXTEND_OP (GET_MODE (x
)) == SIGN_EXTEND
)
4579 return MAX (1, ((int) bitwidth
4580 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1));
4585 /* If the constant is negative, take its 1's complement and remask.
4586 Then see how many zero bits we have. */
4587 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4588 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4589 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4590 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4592 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4595 /* If this is a SUBREG for a promoted object that is sign-extended
4596 and we are looking at it in a wider mode, we know that at least the
4597 high-order bits are known to be sign bit copies. */
4599 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
4601 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4602 known_x
, known_mode
, known_ret
);
4603 return MAX ((int) bitwidth
4604 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1,
4608 /* For a smaller object, just ignore the high bits. */
4609 if (bitwidth
<= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
))))
4611 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4612 known_x
, known_mode
, known_ret
);
4613 return MAX (1, (num0
4614 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
)))
4618 #ifdef WORD_REGISTER_OPERATIONS
4619 #ifdef LOAD_EXTEND_OP
4620 /* For paradoxical SUBREGs on machines where all register operations
4621 affect the entire register, just look inside. Note that we are
4622 passing MODE to the recursive call, so the number of sign bit copies
4623 will remain relative to that mode, not the inner mode. */
4625 /* This works only if loads sign extend. Otherwise, if we get a
4626 reload for the inner part, it may be loaded from the stack, and
4627 then we lose all sign bit copies that existed before the store
4630 if (paradoxical_subreg_p (x
)
4631 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4632 && MEM_P (SUBREG_REG (x
)))
4633 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4634 known_x
, known_mode
, known_ret
);
4640 if (CONST_INT_P (XEXP (x
, 1)))
4641 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
4645 return (bitwidth
- GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4646 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4647 known_x
, known_mode
, known_ret
));
4650 /* For a smaller object, just ignore the high bits. */
4651 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4652 known_x
, known_mode
, known_ret
);
4653 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4657 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4658 known_x
, known_mode
, known_ret
);
4660 case ROTATE
: case ROTATERT
:
4661 /* If we are rotating left by a number of bits less than the number
4662 of sign bit copies, we can just subtract that amount from the
4664 if (CONST_INT_P (XEXP (x
, 1))
4665 && INTVAL (XEXP (x
, 1)) >= 0
4666 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
4668 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4669 known_x
, known_mode
, known_ret
);
4670 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
4671 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
4676 /* In general, this subtracts one sign bit copy. But if the value
4677 is known to be positive, the number of sign bit copies is the
4678 same as that of the input. Finally, if the input has just one bit
4679 that might be nonzero, all the bits are copies of the sign bit. */
4680 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4681 known_x
, known_mode
, known_ret
);
4682 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4683 return num0
> 1 ? num0
- 1 : 1;
4685 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4690 && (((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
))
4695 case IOR
: case AND
: case XOR
:
4696 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
4697 /* Logical operations will preserve the number of sign-bit copies.
4698 MIN and MAX operations always return one of the operands. */
4699 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4700 known_x
, known_mode
, known_ret
);
4701 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4702 known_x
, known_mode
, known_ret
);
4704 /* If num1 is clearing some of the top bits then regardless of
4705 the other term, we are guaranteed to have at least that many
4706 high-order zero bits. */
4709 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4710 && CONST_INT_P (XEXP (x
, 1))
4711 && (UINTVAL (XEXP (x
, 1))
4712 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) == 0)
4715 /* Similarly for IOR when setting high-order bits. */
4718 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4719 && CONST_INT_P (XEXP (x
, 1))
4720 && (UINTVAL (XEXP (x
, 1))
4721 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4724 return MIN (num0
, num1
);
4726 case PLUS
: case MINUS
:
4727 /* For addition and subtraction, we can have a 1-bit carry. However,
4728 if we are subtracting 1 from a positive number, there will not
4729 be such a carry. Furthermore, if the positive number is known to
4730 be 0 or 1, we know the result is either -1 or 0. */
4732 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
4733 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
4735 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4736 if ((((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
) == 0)
4737 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
4738 : bitwidth
- floor_log2 (nonzero
) - 1);
4741 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4742 known_x
, known_mode
, known_ret
);
4743 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4744 known_x
, known_mode
, known_ret
);
4745 result
= MAX (1, MIN (num0
, num1
) - 1);
4750 /* The number of bits of the product is the sum of the number of
4751 bits of both terms. However, unless one of the terms if known
4752 to be positive, we must allow for an additional bit since negating
4753 a negative number can remove one sign bit copy. */
4755 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4756 known_x
, known_mode
, known_ret
);
4757 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4758 known_x
, known_mode
, known_ret
);
4760 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
4762 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4763 || (((nonzero_bits (XEXP (x
, 0), mode
)
4764 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4765 && ((nonzero_bits (XEXP (x
, 1), mode
)
4766 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)))
4770 return MAX (1, result
);
4773 /* The result must be <= the first operand. If the first operand
4774 has the high bit set, we know nothing about the number of sign
4776 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4778 else if ((nonzero_bits (XEXP (x
, 0), mode
)
4779 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4782 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4783 known_x
, known_mode
, known_ret
);
4786 /* The result must be <= the second operand. If the second operand
4787 has (or just might have) the high bit set, we know nothing about
4788 the number of sign bit copies. */
4789 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4791 else if ((nonzero_bits (XEXP (x
, 1), mode
)
4792 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4795 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4796 known_x
, known_mode
, known_ret
);
4799 /* Similar to unsigned division, except that we have to worry about
4800 the case where the divisor is negative, in which case we have
4802 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4803 known_x
, known_mode
, known_ret
);
4805 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4806 || (nonzero_bits (XEXP (x
, 1), mode
)
4807 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4813 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4814 known_x
, known_mode
, known_ret
);
4816 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4817 || (nonzero_bits (XEXP (x
, 1), mode
)
4818 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4824 /* Shifts by a constant add to the number of bits equal to the
4826 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4827 known_x
, known_mode
, known_ret
);
4828 if (CONST_INT_P (XEXP (x
, 1))
4829 && INTVAL (XEXP (x
, 1)) > 0
4830 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4831 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
4836 /* Left shifts destroy copies. */
4837 if (!CONST_INT_P (XEXP (x
, 1))
4838 || INTVAL (XEXP (x
, 1)) < 0
4839 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
4840 || INTVAL (XEXP (x
, 1)) >= GET_MODE_PRECISION (GET_MODE (x
)))
4843 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4844 known_x
, known_mode
, known_ret
);
4845 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
4848 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4849 known_x
, known_mode
, known_ret
);
4850 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
4851 known_x
, known_mode
, known_ret
);
4852 return MIN (num0
, num1
);
4854 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
4855 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
4856 case GEU
: case GTU
: case LEU
: case LTU
:
4857 case UNORDERED
: case ORDERED
:
4858 /* If the constant is negative, take its 1's complement and remask.
4859 Then see how many zero bits we have. */
4860 nonzero
= STORE_FLAG_VALUE
;
4861 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4862 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4863 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4865 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4871 /* If we haven't been able to figure it out by one of the above rules,
4872 see if some of the high-order bits are known to be zero. If so,
4873 count those bits and return one less than that amount. If we can't
4874 safely compute the mask for this mode, always return BITWIDTH. */
4876 bitwidth
= GET_MODE_PRECISION (mode
);
4877 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4880 nonzero
= nonzero_bits (x
, mode
);
4881 return nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))
4882 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
4885 /* Calculate the rtx_cost of a single instruction. A return value of
4886 zero indicates an instruction pattern without a known cost. */
4889 insn_rtx_cost (rtx pat
, bool speed
)
4894 /* Extract the single set rtx from the instruction pattern.
4895 We can't use single_set since we only have the pattern. */
4896 if (GET_CODE (pat
) == SET
)
4898 else if (GET_CODE (pat
) == PARALLEL
)
4901 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
4903 rtx x
= XVECEXP (pat
, 0, i
);
4904 if (GET_CODE (x
) == SET
)
4917 cost
= set_src_cost (SET_SRC (set
), speed
);
4918 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
4921 /* Returns estimate on cost of computing SEQ. */
4924 seq_cost (const rtx_insn
*seq
, bool speed
)
4929 for (; seq
; seq
= NEXT_INSN (seq
))
4931 set
= single_set (seq
);
4933 cost
+= set_rtx_cost (set
, speed
);
4941 /* Given an insn INSN and condition COND, return the condition in a
4942 canonical form to simplify testing by callers. Specifically:
4944 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4945 (2) Both operands will be machine operands; (cc0) will have been replaced.
4946 (3) If an operand is a constant, it will be the second operand.
4947 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4948 for GE, GEU, and LEU.
4950 If the condition cannot be understood, or is an inequality floating-point
4951 comparison which needs to be reversed, 0 will be returned.
4953 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4955 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4956 insn used in locating the condition was found. If a replacement test
4957 of the condition is desired, it should be placed in front of that
4958 insn and we will be sure that the inputs are still valid.
4960 If WANT_REG is nonzero, we wish the condition to be relative to that
4961 register, if possible. Therefore, do not canonicalize the condition
4962 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4963 to be a compare to a CC mode register.
4965 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4969 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
4970 rtx_insn
**earliest
,
4971 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
4974 rtx_insn
*prev
= insn
;
4978 int reverse_code
= 0;
4980 basic_block bb
= BLOCK_FOR_INSN (insn
);
4982 code
= GET_CODE (cond
);
4983 mode
= GET_MODE (cond
);
4984 op0
= XEXP (cond
, 0);
4985 op1
= XEXP (cond
, 1);
4988 code
= reversed_comparison_code (cond
, insn
);
4989 if (code
== UNKNOWN
)
4995 /* If we are comparing a register with zero, see if the register is set
4996 in the previous insn to a COMPARE or a comparison operation. Perform
4997 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5000 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5001 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5002 && op1
== CONST0_RTX (GET_MODE (op0
))
5005 /* Set nonzero when we find something of interest. */
5009 /* If comparison with cc0, import actual comparison from compare
5013 if ((prev
= prev_nonnote_insn (prev
)) == 0
5014 || !NONJUMP_INSN_P (prev
)
5015 || (set
= single_set (prev
)) == 0
5016 || SET_DEST (set
) != cc0_rtx
)
5019 op0
= SET_SRC (set
);
5020 op1
= CONST0_RTX (GET_MODE (op0
));
5026 /* If this is a COMPARE, pick up the two things being compared. */
5027 if (GET_CODE (op0
) == COMPARE
)
5029 op1
= XEXP (op0
, 1);
5030 op0
= XEXP (op0
, 0);
5033 else if (!REG_P (op0
))
5036 /* Go back to the previous insn. Stop if it is not an INSN. We also
5037 stop if it isn't a single set or if it has a REG_INC note because
5038 we don't want to bother dealing with it. */
5040 prev
= prev_nonnote_nondebug_insn (prev
);
5043 || !NONJUMP_INSN_P (prev
)
5044 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5045 /* In cfglayout mode, there do not have to be labels at the
5046 beginning of a block, or jumps at the end, so the previous
5047 conditions would not stop us when we reach bb boundary. */
5048 || BLOCK_FOR_INSN (prev
) != bb
)
5051 set
= set_of (op0
, prev
);
5054 && (GET_CODE (set
) != SET
5055 || !rtx_equal_p (SET_DEST (set
), op0
)))
5058 /* If this is setting OP0, get what it sets it to if it looks
5062 machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5063 #ifdef FLOAT_STORE_FLAG_VALUE
5064 REAL_VALUE_TYPE fsfv
;
5067 /* ??? We may not combine comparisons done in a CCmode with
5068 comparisons not done in a CCmode. This is to aid targets
5069 like Alpha that have an IEEE compliant EQ instruction, and
5070 a non-IEEE compliant BEQ instruction. The use of CCmode is
5071 actually artificial, simply to prevent the combination, but
5072 should not affect other platforms.
5074 However, we must allow VOIDmode comparisons to match either
5075 CCmode or non-CCmode comparison, because some ports have
5076 modeless comparisons inside branch patterns.
5078 ??? This mode check should perhaps look more like the mode check
5079 in simplify_comparison in combine. */
5080 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5081 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5083 && inner_mode
!= VOIDmode
)
5085 if (GET_CODE (SET_SRC (set
)) == COMPARE
5088 && val_signbit_known_set_p (inner_mode
,
5090 #ifdef FLOAT_STORE_FLAG_VALUE
5092 && SCALAR_FLOAT_MODE_P (inner_mode
)
5093 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5094 REAL_VALUE_NEGATIVE (fsfv
)))
5097 && COMPARISON_P (SET_SRC (set
))))
5099 else if (((code
== EQ
5101 && val_signbit_known_set_p (inner_mode
,
5103 #ifdef FLOAT_STORE_FLAG_VALUE
5105 && SCALAR_FLOAT_MODE_P (inner_mode
)
5106 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5107 REAL_VALUE_NEGATIVE (fsfv
)))
5110 && COMPARISON_P (SET_SRC (set
)))
5115 else if ((code
== EQ
|| code
== NE
)
5116 && GET_CODE (SET_SRC (set
)) == XOR
)
5117 /* Handle sequences like:
5120 ...(eq|ne op0 (const_int 0))...
5124 (eq op0 (const_int 0)) reduces to (eq X Y)
5125 (ne op0 (const_int 0)) reduces to (ne X Y)
5127 This is the form used by MIPS16, for example. */
5133 else if (reg_set_p (op0
, prev
))
5134 /* If this sets OP0, but not directly, we have to give up. */
5139 /* If the caller is expecting the condition to be valid at INSN,
5140 make sure X doesn't change before INSN. */
5141 if (valid_at_insn_p
)
5142 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5144 if (COMPARISON_P (x
))
5145 code
= GET_CODE (x
);
5148 code
= reversed_comparison_code (x
, prev
);
5149 if (code
== UNKNOWN
)
5154 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5160 /* If constant is first, put it last. */
5161 if (CONSTANT_P (op0
))
5162 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5164 /* If OP0 is the result of a comparison, we weren't able to find what
5165 was really being compared, so fail. */
5167 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5170 /* Canonicalize any ordered comparison with integers involving equality
5171 if we can do computations in the relevant mode and we do not
5174 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
5175 && CONST_INT_P (op1
)
5176 && GET_MODE (op0
) != VOIDmode
5177 && GET_MODE_PRECISION (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
5179 HOST_WIDE_INT const_val
= INTVAL (op1
);
5180 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5181 unsigned HOST_WIDE_INT max_val
5182 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
5187 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5188 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
5191 /* When cross-compiling, const_val might be sign-extended from
5192 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5194 if ((const_val
& max_val
)
5195 != ((unsigned HOST_WIDE_INT
) 1
5196 << (GET_MODE_PRECISION (GET_MODE (op0
)) - 1)))
5197 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
5201 if (uconst_val
< max_val
)
5202 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
5206 if (uconst_val
!= 0)
5207 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
5215 /* Never return CC0; return zero instead. */
5219 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5222 /* Given a jump insn JUMP, return the condition that will cause it to branch
5223 to its JUMP_LABEL. If the condition cannot be understood, or is an
5224 inequality floating-point comparison which needs to be reversed, 0 will
5227 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5228 insn used in locating the condition was found. If a replacement test
5229 of the condition is desired, it should be placed in front of that
5230 insn and we will be sure that the inputs are still valid. If EARLIEST
5231 is null, the returned condition will be valid at INSN.
5233 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5234 compare CC mode register.
5236 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5239 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5240 int valid_at_insn_p
)
5246 /* If this is not a standard conditional jump, we can't parse it. */
5248 || ! any_condjump_p (jump
))
5250 set
= pc_set (jump
);
5252 cond
= XEXP (SET_SRC (set
), 0);
5254 /* If this branches to JUMP_LABEL when the condition is false, reverse
5257 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5258 && LABEL_REF_LABEL (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
5260 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5261 allow_cc_mode
, valid_at_insn_p
);
5264 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5265 TARGET_MODE_REP_EXTENDED.
5267 Note that we assume that the property of
5268 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5269 narrower than mode B. I.e., if A is a mode narrower than B then in
5270 order to be able to operate on it in mode B, mode A needs to
5271 satisfy the requirements set by the representation of mode B. */
5274 init_num_sign_bit_copies_in_rep (void)
5276 machine_mode mode
, in_mode
;
5278 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
5279 in_mode
= GET_MODE_WIDER_MODE (mode
))
5280 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
5281 mode
= GET_MODE_WIDER_MODE (mode
))
5285 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5286 extends to the next widest mode. */
5287 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5288 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
5290 /* We are in in_mode. Count how many bits outside of mode
5291 have to be copies of the sign-bit. */
5292 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
5294 machine_mode wider
= GET_MODE_WIDER_MODE (i
);
5296 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5297 /* We can only check sign-bit copies starting from the
5298 top-bit. In order to be able to check the bits we
5299 have already seen we pretend that subsequent bits
5300 have to be sign-bit copies too. */
5301 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5302 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5303 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5308 /* Suppose that truncation from the machine mode of X to MODE is not a
5309 no-op. See if there is anything special about X so that we can
5310 assume it already contains a truncated value of MODE. */
5313 truncated_to_mode (machine_mode mode
, const_rtx x
)
5315 /* This register has already been used in MODE without explicit
5317 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5320 /* See if we already satisfy the requirements of MODE. If yes we
5321 can just switch to MODE. */
5322 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5323 && (num_sign_bit_copies (x
, GET_MODE (x
))
5324 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5330 /* Return true if RTX code CODE has a single sequence of zero or more
5331 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5332 entry in that case. */
5335 setup_reg_subrtx_bounds (unsigned int code
)
5337 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5339 for (; format
[i
] != 'e'; ++i
)
5342 /* No subrtxes. Leave start and count as 0. */
5344 if (format
[i
] == 'E' || format
[i
] == 'V')
5348 /* Record the sequence of 'e's. */
5349 rtx_all_subrtx_bounds
[code
].start
= i
;
5352 while (format
[i
] == 'e');
5353 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5354 /* rtl-iter.h relies on this. */
5355 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5357 for (; format
[i
]; ++i
)
5358 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5364 /* Initialize rtx_all_subrtx_bounds. */
5369 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5371 if (!setup_reg_subrtx_bounds (i
))
5372 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5373 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5374 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5377 init_num_sign_bit_copies_in_rep ();
5380 /* Check whether this is a constant pool constant. */
5382 constant_pool_constant_p (rtx x
)
5384 x
= avoid_constant_pool_reference (x
);
5385 return CONST_DOUBLE_P (x
);
5388 /* If M is a bitmask that selects a field of low-order bits within an item but
5389 not the entire word, return the length of the field. Return -1 otherwise.
5390 M is used in machine mode MODE. */
5393 low_bitmask_len (machine_mode mode
, unsigned HOST_WIDE_INT m
)
5395 if (mode
!= VOIDmode
)
5397 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
5399 m
&= GET_MODE_MASK (mode
);
5402 return exact_log2 (m
+ 1);
5405 /* Return the mode of MEM's address. */
5408 get_address_mode (rtx mem
)
5412 gcc_assert (MEM_P (mem
));
5413 mode
= GET_MODE (XEXP (mem
, 0));
5414 if (mode
!= VOIDmode
)
5416 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5419 /* Split up a CONST_DOUBLE or integer constant rtx
5420 into two rtx's for single words,
5421 storing in *FIRST the word that comes first in memory in the target
5422 and in *SECOND the other.
5424 TODO: This function needs to be rewritten to work on any size
5428 split_double (rtx value
, rtx
*first
, rtx
*second
)
5430 if (CONST_INT_P (value
))
5432 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5434 /* In this case the CONST_INT holds both target words.
5435 Extract the bits from it into two word-sized pieces.
5436 Sign extend each half to HOST_WIDE_INT. */
5437 unsigned HOST_WIDE_INT low
, high
;
5438 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5439 unsigned bits_per_word
= BITS_PER_WORD
;
5441 /* Set sign_bit to the most significant bit of a word. */
5443 sign_bit
<<= bits_per_word
- 1;
5445 /* Set mask so that all bits of the word are set. We could
5446 have used 1 << BITS_PER_WORD instead of basing the
5447 calculation on sign_bit. However, on machines where
5448 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5449 compiler warning, even though the code would never be
5451 mask
= sign_bit
<< 1;
5454 /* Set sign_extend as any remaining bits. */
5455 sign_extend
= ~mask
;
5457 /* Pick the lower word and sign-extend it. */
5458 low
= INTVAL (value
);
5463 /* Pick the higher word, shifted to the least significant
5464 bits, and sign-extend it. */
5465 high
= INTVAL (value
);
5466 high
>>= bits_per_word
- 1;
5469 if (high
& sign_bit
)
5470 high
|= sign_extend
;
5472 /* Store the words in the target machine order. */
5473 if (WORDS_BIG_ENDIAN
)
5475 *first
= GEN_INT (high
);
5476 *second
= GEN_INT (low
);
5480 *first
= GEN_INT (low
);
5481 *second
= GEN_INT (high
);
5486 /* The rule for using CONST_INT for a wider mode
5487 is that we regard the value as signed.
5488 So sign-extend it. */
5489 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
5490 if (WORDS_BIG_ENDIAN
)
5502 else if (GET_CODE (value
) == CONST_WIDE_INT
)
5504 /* All of this is scary code and needs to be converted to
5505 properly work with any size integer. */
5506 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
5507 if (WORDS_BIG_ENDIAN
)
5509 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5510 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5514 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5515 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5518 else if (!CONST_DOUBLE_P (value
))
5520 if (WORDS_BIG_ENDIAN
)
5522 *first
= const0_rtx
;
5528 *second
= const0_rtx
;
5531 else if (GET_MODE (value
) == VOIDmode
5532 /* This is the old way we did CONST_DOUBLE integers. */
5533 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
5535 /* In an integer, the words are defined as most and least significant.
5536 So order them by the target's convention. */
5537 if (WORDS_BIG_ENDIAN
)
5539 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5540 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
5544 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
5545 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5552 REAL_VALUE_FROM_CONST_DOUBLE (r
, value
);
5554 /* Note, this converts the REAL_VALUE_TYPE to the target's
5555 format, splits up the floating point double and outputs
5556 exactly 32 bits of it into each of l[0] and l[1] --
5557 not necessarily BITS_PER_WORD bits. */
5558 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
5560 /* If 32 bits is an entire word for the target, but not for the host,
5561 then sign-extend on the host so that the number will look the same
5562 way on the host that it would on the target. See for instance
5563 simplify_unary_operation. The #if is needed to avoid compiler
5566 #if HOST_BITS_PER_LONG > 32
5567 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
5569 if (l
[0] & ((long) 1 << 31))
5570 l
[0] |= ((long) (-1) << 32);
5571 if (l
[1] & ((long) 1 << 31))
5572 l
[1] |= ((long) (-1) << 32);
5576 *first
= GEN_INT (l
[0]);
5577 *second
= GEN_INT (l
[1]);
5581 /* Return true if X is a sign_extract or zero_extract from the least
5585 lsb_bitfield_op_p (rtx x
)
5587 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
5589 machine_mode mode
= GET_MODE (XEXP (x
, 0));
5590 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
5591 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
5593 return (pos
== (BITS_BIG_ENDIAN
? GET_MODE_PRECISION (mode
) - len
: 0));
5598 /* Strip outer address "mutations" from LOC and return a pointer to the
5599 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5600 stripped expression there.
5602 "Mutations" either convert between modes or apply some kind of
5603 extension, truncation or alignment. */
5606 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
5610 enum rtx_code code
= GET_CODE (*loc
);
5611 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
5612 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5613 used to convert between pointer sizes. */
5614 loc
= &XEXP (*loc
, 0);
5615 else if (lsb_bitfield_op_p (*loc
))
5616 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5617 acts as a combined truncation and extension. */
5618 loc
= &XEXP (*loc
, 0);
5619 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
5620 /* (and ... (const_int -X)) is used to align to X bytes. */
5621 loc
= &XEXP (*loc
, 0);
5622 else if (code
== SUBREG
5623 && !OBJECT_P (SUBREG_REG (*loc
))
5624 && subreg_lowpart_p (*loc
))
5625 /* (subreg (operator ...) ...) inside and is used for mode
5627 loc
= &SUBREG_REG (*loc
);
5635 /* Return true if CODE applies some kind of scale. The scaled value is
5636 is the first operand and the scale is the second. */
5639 binary_scale_code_p (enum rtx_code code
)
5641 return (code
== MULT
5643 /* Needed by ARM targets. */
5647 || code
== ROTATERT
);
5650 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5651 (see address_info). Return null otherwise. */
5654 get_base_term (rtx
*inner
)
5656 if (GET_CODE (*inner
) == LO_SUM
)
5657 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5660 || GET_CODE (*inner
) == SUBREG
5661 || GET_CODE (*inner
) == SCRATCH
)
5666 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5667 (see address_info). Return null otherwise. */
5670 get_index_term (rtx
*inner
)
5672 /* At present, only constant scales are allowed. */
5673 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
5674 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5677 || GET_CODE (*inner
) == SUBREG
)
5682 /* Set the segment part of address INFO to LOC, given that INNER is the
5686 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5688 gcc_assert (!info
->segment
);
5689 info
->segment
= loc
;
5690 info
->segment_term
= inner
;
5693 /* Set the base part of address INFO to LOC, given that INNER is the
5697 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5699 gcc_assert (!info
->base
);
5701 info
->base_term
= inner
;
5704 /* Set the index part of address INFO to LOC, given that INNER is the
5708 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5710 gcc_assert (!info
->index
);
5712 info
->index_term
= inner
;
5715 /* Set the displacement part of address INFO to LOC, given that INNER
5716 is the constant term. */
5719 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5721 gcc_assert (!info
->disp
);
5723 info
->disp_term
= inner
;
5726 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5727 rest of INFO accordingly. */
5730 decompose_incdec_address (struct address_info
*info
)
5732 info
->autoinc_p
= true;
5734 rtx
*base
= &XEXP (*info
->inner
, 0);
5735 set_address_base (info
, base
, base
);
5736 gcc_checking_assert (info
->base
== info
->base_term
);
5738 /* These addresses are only valid when the size of the addressed
5740 gcc_checking_assert (info
->mode
!= VOIDmode
);
5743 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5744 of INFO accordingly. */
5747 decompose_automod_address (struct address_info
*info
)
5749 info
->autoinc_p
= true;
5751 rtx
*base
= &XEXP (*info
->inner
, 0);
5752 set_address_base (info
, base
, base
);
5753 gcc_checking_assert (info
->base
== info
->base_term
);
5755 rtx plus
= XEXP (*info
->inner
, 1);
5756 gcc_assert (GET_CODE (plus
) == PLUS
);
5758 info
->base_term2
= &XEXP (plus
, 0);
5759 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
5761 rtx
*step
= &XEXP (plus
, 1);
5762 rtx
*inner_step
= strip_address_mutations (step
);
5763 if (CONSTANT_P (*inner_step
))
5764 set_address_disp (info
, step
, inner_step
);
5766 set_address_index (info
, step
, inner_step
);
5769 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5770 values in [PTR, END). Return a pointer to the end of the used array. */
5773 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
5776 if (GET_CODE (x
) == PLUS
)
5778 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
5779 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
5783 gcc_assert (ptr
!= end
);
5789 /* Evaluate the likelihood of X being a base or index value, returning
5790 positive if it is likely to be a base, negative if it is likely to be
5791 an index, and 0 if we can't tell. Make the magnitude of the return
5792 value reflect the amount of confidence we have in the answer.
5794 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5797 baseness (rtx x
, machine_mode mode
, addr_space_t as
,
5798 enum rtx_code outer_code
, enum rtx_code index_code
)
5800 /* Believe *_POINTER unless the address shape requires otherwise. */
5801 if (REG_P (x
) && REG_POINTER (x
))
5803 if (MEM_P (x
) && MEM_POINTER (x
))
5806 if (REG_P (x
) && HARD_REGISTER_P (x
))
5808 /* X is a hard register. If it only fits one of the base
5809 or index classes, choose that interpretation. */
5810 int regno
= REGNO (x
);
5811 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
5812 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
5813 if (base_p
!= index_p
)
5814 return base_p
? 1 : -1;
5819 /* INFO->INNER describes a normal, non-automodified address.
5820 Fill in the rest of INFO accordingly. */
5823 decompose_normal_address (struct address_info
*info
)
5825 /* Treat the address as the sum of up to four values. */
5827 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
5828 ops
+ ARRAY_SIZE (ops
)) - ops
;
5830 /* If there is more than one component, any base component is in a PLUS. */
5832 info
->base_outer_code
= PLUS
;
5834 /* Try to classify each sum operand now. Leave those that could be
5835 either a base or an index in OPS. */
5838 for (size_t in
= 0; in
< n_ops
; ++in
)
5841 rtx
*inner
= strip_address_mutations (loc
);
5842 if (CONSTANT_P (*inner
))
5843 set_address_disp (info
, loc
, inner
);
5844 else if (GET_CODE (*inner
) == UNSPEC
)
5845 set_address_segment (info
, loc
, inner
);
5848 /* The only other possibilities are a base or an index. */
5849 rtx
*base_term
= get_base_term (inner
);
5850 rtx
*index_term
= get_index_term (inner
);
5851 gcc_assert (base_term
|| index_term
);
5853 set_address_index (info
, loc
, index_term
);
5854 else if (!index_term
)
5855 set_address_base (info
, loc
, base_term
);
5858 gcc_assert (base_term
== index_term
);
5860 inner_ops
[out
] = base_term
;
5866 /* Classify the remaining OPS members as bases and indexes. */
5869 /* If we haven't seen a base or an index yet, assume that this is
5870 the base. If we were confident that another term was the base
5871 or index, treat the remaining operand as the other kind. */
5873 set_address_base (info
, ops
[0], inner_ops
[0]);
5875 set_address_index (info
, ops
[0], inner_ops
[0]);
5879 /* In the event of a tie, assume the base comes first. */
5880 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
5882 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
5883 GET_CODE (*ops
[0])))
5885 set_address_base (info
, ops
[0], inner_ops
[0]);
5886 set_address_index (info
, ops
[1], inner_ops
[1]);
5890 set_address_base (info
, ops
[1], inner_ops
[1]);
5891 set_address_index (info
, ops
[0], inner_ops
[0]);
5895 gcc_assert (out
== 0);
5898 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
5899 or VOIDmode if not known. AS is the address space associated with LOC.
5900 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
5903 decompose_address (struct address_info
*info
, rtx
*loc
, machine_mode mode
,
5904 addr_space_t as
, enum rtx_code outer_code
)
5906 memset (info
, 0, sizeof (*info
));
5909 info
->addr_outer_code
= outer_code
;
5911 info
->inner
= strip_address_mutations (loc
, &outer_code
);
5912 info
->base_outer_code
= outer_code
;
5913 switch (GET_CODE (*info
->inner
))
5919 decompose_incdec_address (info
);
5924 decompose_automod_address (info
);
5928 decompose_normal_address (info
);
5933 /* Describe address operand LOC in INFO. */
5936 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
5938 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
5941 /* Describe the address of MEM X in INFO. */
5944 decompose_mem_address (struct address_info
*info
, rtx x
)
5946 gcc_assert (MEM_P (x
));
5947 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
5948 MEM_ADDR_SPACE (x
), MEM
);
5951 /* Update INFO after a change to the address it describes. */
5954 update_address (struct address_info
*info
)
5956 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
5957 info
->addr_outer_code
);
5960 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
5961 more complicated than that. */
5964 get_index_scale (const struct address_info
*info
)
5966 rtx index
= *info
->index
;
5967 if (GET_CODE (index
) == MULT
5968 && CONST_INT_P (XEXP (index
, 1))
5969 && info
->index_term
== &XEXP (index
, 0))
5970 return INTVAL (XEXP (index
, 1));
5972 if (GET_CODE (index
) == ASHIFT
5973 && CONST_INT_P (XEXP (index
, 1))
5974 && info
->index_term
== &XEXP (index
, 0))
5975 return (HOST_WIDE_INT
) 1 << INTVAL (XEXP (index
, 1));
5977 if (info
->index
== info
->index_term
)
5983 /* Return the "index code" of INFO, in the form required by
5987 get_index_code (const struct address_info
*info
)
5990 return GET_CODE (*info
->index
);
5993 return GET_CODE (*info
->disp
);
5998 /* Return true if X contains a thread-local symbol. */
6001 tls_referenced_p (const_rtx x
)
6003 if (!targetm
.have_tls
)
6006 subrtx_iterator::array_type array
;
6007 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6008 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)