1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
28 #include "insn-config.h"
42 #include "basic-block.h"
48 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
49 #include "addresses.h"
52 /* Forward declarations */
53 static void set_of_1 (rtx
, const_rtx
, void *);
54 static bool covers_regno_p (const_rtx
, unsigned int);
55 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
56 static int computed_jump_p_1 (const_rtx
);
57 static void parms_set (rtx
, const_rtx
, void *);
59 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, machine_mode
,
60 const_rtx
, machine_mode
,
61 unsigned HOST_WIDE_INT
);
62 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, machine_mode
,
63 const_rtx
, machine_mode
,
64 unsigned HOST_WIDE_INT
);
65 static unsigned int cached_num_sign_bit_copies (const_rtx
, machine_mode
, const_rtx
,
68 static unsigned int num_sign_bit_copies1 (const_rtx
, machine_mode
, const_rtx
,
69 machine_mode
, unsigned int);
71 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
72 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
74 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
75 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
76 SIGN_EXTEND then while narrowing we also have to enforce the
77 representation and sign-extend the value to mode DESTINATION_REP.
79 If the value is already sign-extended to DESTINATION_REP mode we
80 can just switch to DESTINATION mode on it. For each pair of
81 integral modes SOURCE and DESTINATION, when truncating from SOURCE
82 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
83 contains the number of high-order bits in SOURCE that have to be
84 copies of the sign-bit so that we can do this mode-switch to
88 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
90 /* Store X into index I of ARRAY. ARRAY is known to have at least I
91 elements. Return the new base of ARRAY. */
94 typename
T::value_type
*
95 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
97 size_t i
, value_type x
)
99 if (base
== array
.stack
)
106 gcc_checking_assert (i
== LOCAL_ELEMS
);
107 vec_safe_grow (array
.heap
, i
+ 1);
108 base
= array
.heap
->address ();
109 memcpy (base
, array
.stack
, sizeof (array
.stack
));
110 base
[LOCAL_ELEMS
] = x
;
113 unsigned int length
= array
.heap
->length ();
116 gcc_checking_assert (base
== array
.heap
->address ());
122 gcc_checking_assert (i
== length
);
123 vec_safe_push (array
.heap
, x
);
124 return array
.heap
->address ();
128 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
129 number of elements added to the worklist. */
131 template <typename T
>
133 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
135 size_t end
, rtx_type x
)
137 enum rtx_code code
= GET_CODE (x
);
138 const char *format
= GET_RTX_FORMAT (code
);
139 size_t orig_end
= end
;
140 if (__builtin_expect (INSN_P (x
), false))
142 /* Put the pattern at the top of the queue, since that's what
143 we're likely to want most. It also allows for the SEQUENCE
145 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
146 if (format
[i
] == 'e')
148 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
149 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
152 base
= add_single_to_queue (array
, base
, end
++, subx
);
156 for (int i
= 0; format
[i
]; ++i
)
157 if (format
[i
] == 'e')
159 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
160 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
163 base
= add_single_to_queue (array
, base
, end
++, subx
);
165 else if (format
[i
] == 'E')
167 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
168 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
169 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
170 for (unsigned int j
= 0; j
< length
; j
++)
171 base
[end
++] = T::get_value (vec
[j
]);
173 for (unsigned int j
= 0; j
< length
; j
++)
174 base
= add_single_to_queue (array
, base
, end
++,
175 T::get_value (vec
[j
]));
176 if (code
== SEQUENCE
&& end
== length
)
177 /* If the subrtxes of the sequence fill the entire array then
178 we know that no other parts of a containing insn are queued.
179 The caller is therefore iterating over the sequence as a
180 PATTERN (...), so we also want the patterns of the
182 for (unsigned int j
= 0; j
< length
; j
++)
184 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
186 base
[j
] = T::get_value (PATTERN (x
));
189 return end
- orig_end
;
192 template <typename T
>
194 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
196 vec_free (array
.heap
);
199 template <typename T
>
200 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
202 template class generic_subrtx_iterator
<const_rtx_accessor
>;
203 template class generic_subrtx_iterator
<rtx_var_accessor
>;
204 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
206 /* Return 1 if the value of X is unstable
207 (would be different at a different point in the program).
208 The frame pointer, arg pointer, etc. are considered stable
209 (within one function) and so is anything marked `unchanging'. */
212 rtx_unstable_p (const_rtx x
)
214 const RTX_CODE code
= GET_CODE (x
);
221 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
230 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
231 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
232 /* The arg pointer varies if it is not a fixed register. */
233 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
235 /* ??? When call-clobbered, the value is stable modulo the restore
236 that must happen after a call. This currently screws up local-alloc
237 into believing that the restore is not needed. */
238 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
243 if (MEM_VOLATILE_P (x
))
252 fmt
= GET_RTX_FORMAT (code
);
253 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
256 if (rtx_unstable_p (XEXP (x
, i
)))
259 else if (fmt
[i
] == 'E')
262 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
263 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
270 /* Return 1 if X has a value that can vary even between two
271 executions of the program. 0 means X can be compared reliably
272 against certain constants or near-constants.
273 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
274 zero, we are slightly more conservative.
275 The frame pointer and the arg pointer are considered constant. */
278 rtx_varies_p (const_rtx x
, bool for_alias
)
291 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
300 /* Note that we have to test for the actual rtx used for the frame
301 and arg pointers and not just the register number in case we have
302 eliminated the frame and/or arg pointer and are using it
304 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
305 /* The arg pointer varies if it is not a fixed register. */
306 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
308 if (x
== pic_offset_table_rtx
309 /* ??? When call-clobbered, the value is stable modulo the restore
310 that must happen after a call. This currently screws up
311 local-alloc into believing that the restore is not needed, so we
312 must return 0 only if we are called from alias analysis. */
313 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
318 /* The operand 0 of a LO_SUM is considered constant
319 (in fact it is related specifically to operand 1)
320 during alias analysis. */
321 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
322 || rtx_varies_p (XEXP (x
, 1), for_alias
);
325 if (MEM_VOLATILE_P (x
))
334 fmt
= GET_RTX_FORMAT (code
);
335 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
338 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
341 else if (fmt
[i
] == 'E')
344 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
345 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
352 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
353 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
354 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
355 references on strict alignment machines. */
358 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
359 machine_mode mode
, bool unaligned_mems
)
361 enum rtx_code code
= GET_CODE (x
);
363 /* The offset must be a multiple of the mode size if we are considering
364 unaligned memory references on strict alignment machines. */
365 if (STRICT_ALIGNMENT
&& unaligned_mems
&& GET_MODE_SIZE (mode
) != 0)
367 HOST_WIDE_INT actual_offset
= offset
;
369 #ifdef SPARC_STACK_BOUNDARY_HACK
370 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
371 the real alignment of %sp. However, when it does this, the
372 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
373 if (SPARC_STACK_BOUNDARY_HACK
374 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
375 actual_offset
-= STACK_POINTER_OFFSET
;
378 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
385 if (SYMBOL_REF_WEAK (x
))
387 if (!CONSTANT_POOL_ADDRESS_P (x
))
390 HOST_WIDE_INT decl_size
;
395 size
= GET_MODE_SIZE (mode
);
399 /* If the size of the access or of the symbol is unknown,
401 decl
= SYMBOL_REF_DECL (x
);
403 /* Else check that the access is in bounds. TODO: restructure
404 expr_size/tree_expr_size/int_expr_size and just use the latter. */
407 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
408 decl_size
= (tree_fits_shwi_p (DECL_SIZE_UNIT (decl
))
409 ? tree_to_shwi (DECL_SIZE_UNIT (decl
))
411 else if (TREE_CODE (decl
) == STRING_CST
)
412 decl_size
= TREE_STRING_LENGTH (decl
);
413 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
414 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
418 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
427 /* Stack references are assumed not to trap, but we need to deal with
428 nonsensical offsets. */
429 if (x
== frame_pointer_rtx
)
431 HOST_WIDE_INT adj_offset
= offset
- STARTING_FRAME_OFFSET
;
433 size
= GET_MODE_SIZE (mode
);
434 if (FRAME_GROWS_DOWNWARD
)
436 if (adj_offset
< frame_offset
|| adj_offset
+ size
- 1 >= 0)
441 if (adj_offset
< 0 || adj_offset
+ size
- 1 >= frame_offset
)
446 /* ??? Need to add a similar guard for nonsensical offsets. */
447 if (x
== hard_frame_pointer_rtx
448 || x
== stack_pointer_rtx
449 /* The arg pointer varies if it is not a fixed register. */
450 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
452 /* All of the virtual frame registers are stack references. */
453 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
454 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
459 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
460 mode
, unaligned_mems
);
463 /* An address is assumed not to trap if:
464 - it is the pic register plus a constant. */
465 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
468 /* - or it is an address that can't trap plus a constant integer. */
469 if (CONST_INT_P (XEXP (x
, 1))
470 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
471 size
, mode
, unaligned_mems
))
478 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
479 mode
, unaligned_mems
);
486 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
487 mode
, unaligned_mems
);
493 /* If it isn't one of the case above, it can cause a trap. */
497 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
500 rtx_addr_can_trap_p (const_rtx x
)
502 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
505 /* Return true if X is an address that is known to not be zero. */
508 nonzero_address_p (const_rtx x
)
510 const enum rtx_code code
= GET_CODE (x
);
515 return !SYMBOL_REF_WEAK (x
);
521 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
522 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
523 || x
== stack_pointer_rtx
524 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
526 /* All of the virtual frame registers are stack references. */
527 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
528 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
533 return nonzero_address_p (XEXP (x
, 0));
536 /* Handle PIC references. */
537 if (XEXP (x
, 0) == pic_offset_table_rtx
538 && CONSTANT_P (XEXP (x
, 1)))
543 /* Similar to the above; allow positive offsets. Further, since
544 auto-inc is only allowed in memories, the register must be a
546 if (CONST_INT_P (XEXP (x
, 1))
547 && INTVAL (XEXP (x
, 1)) > 0)
549 return nonzero_address_p (XEXP (x
, 0));
552 /* Similarly. Further, the offset is always positive. */
559 return nonzero_address_p (XEXP (x
, 0));
562 return nonzero_address_p (XEXP (x
, 1));
568 /* If it isn't one of the case above, might be zero. */
572 /* Return 1 if X refers to a memory location whose address
573 cannot be compared reliably with constant addresses,
574 or if X refers to a BLKmode memory object.
575 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
576 zero, we are slightly more conservative. */
579 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
590 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
592 fmt
= GET_RTX_FORMAT (code
);
593 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
596 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
599 else if (fmt
[i
] == 'E')
602 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
603 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
609 /* Return the CALL in X if there is one. */
612 get_call_rtx_from (rtx x
)
616 if (GET_CODE (x
) == PARALLEL
)
617 x
= XVECEXP (x
, 0, 0);
618 if (GET_CODE (x
) == SET
)
620 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
625 /* Return the value of the integer term in X, if one is apparent;
627 Only obvious integer terms are detected.
628 This is used in cse.c with the `related_value' field. */
631 get_integer_term (const_rtx x
)
633 if (GET_CODE (x
) == CONST
)
636 if (GET_CODE (x
) == MINUS
637 && CONST_INT_P (XEXP (x
, 1)))
638 return - INTVAL (XEXP (x
, 1));
639 if (GET_CODE (x
) == PLUS
640 && CONST_INT_P (XEXP (x
, 1)))
641 return INTVAL (XEXP (x
, 1));
645 /* If X is a constant, return the value sans apparent integer term;
647 Only obvious integer terms are detected. */
650 get_related_value (const_rtx x
)
652 if (GET_CODE (x
) != CONST
)
655 if (GET_CODE (x
) == PLUS
656 && CONST_INT_P (XEXP (x
, 1)))
658 else if (GET_CODE (x
) == MINUS
659 && CONST_INT_P (XEXP (x
, 1)))
664 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
665 to somewhere in the same object or object_block as SYMBOL. */
668 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
672 if (GET_CODE (symbol
) != SYMBOL_REF
)
680 if (CONSTANT_POOL_ADDRESS_P (symbol
)
681 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
684 decl
= SYMBOL_REF_DECL (symbol
);
685 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
689 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
690 && SYMBOL_REF_BLOCK (symbol
)
691 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
692 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
693 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
699 /* Split X into a base and a constant offset, storing them in *BASE_OUT
700 and *OFFSET_OUT respectively. */
703 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
705 if (GET_CODE (x
) == CONST
)
708 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
710 *base_out
= XEXP (x
, 0);
711 *offset_out
= XEXP (x
, 1);
716 *offset_out
= const0_rtx
;
719 /* Return the number of places FIND appears within X. If COUNT_DEST is
720 zero, we do not count occurrences inside the destination of a SET. */
723 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
727 const char *format_ptr
;
746 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
748 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
752 if (MEM_P (find
) && rtx_equal_p (x
, find
))
757 if (SET_DEST (x
) == find
&& ! count_dest
)
758 return count_occurrences (SET_SRC (x
), find
, count_dest
);
765 format_ptr
= GET_RTX_FORMAT (code
);
768 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
770 switch (*format_ptr
++)
773 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
777 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
778 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
786 /* Return TRUE if OP is a register or subreg of a register that
787 holds an unsigned quantity. Otherwise, return FALSE. */
790 unsigned_reg_p (rtx op
)
794 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
797 if (GET_CODE (op
) == SUBREG
798 && SUBREG_PROMOTED_SIGN (op
))
805 /* Nonzero if register REG appears somewhere within IN.
806 Also works if REG is not a register; in this case it checks
807 for a subexpression of IN that is Lisp "equal" to REG. */
810 reg_mentioned_p (const_rtx reg
, const_rtx in
)
822 if (GET_CODE (in
) == LABEL_REF
)
823 return reg
== LABEL_REF_LABEL (in
);
825 code
= GET_CODE (in
);
829 /* Compare registers by number. */
831 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
833 /* These codes have no constituent expressions
841 /* These are kept unique for a given value. */
848 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
851 fmt
= GET_RTX_FORMAT (code
);
853 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
858 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
859 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
862 else if (fmt
[i
] == 'e'
863 && reg_mentioned_p (reg
, XEXP (in
, i
)))
869 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
870 no CODE_LABEL insn. */
873 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
878 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
884 /* Nonzero if register REG is used in an insn between
885 FROM_INSN and TO_INSN (exclusive of those two). */
888 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
889 const rtx_insn
*to_insn
)
893 if (from_insn
== to_insn
)
896 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
897 if (NONDEBUG_INSN_P (insn
)
898 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
899 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
904 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
905 is entirely replaced by a new value and the only use is as a SET_DEST,
906 we do not consider it a reference. */
909 reg_referenced_p (const_rtx x
, const_rtx body
)
913 switch (GET_CODE (body
))
916 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
919 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
920 of a REG that occupies all of the REG, the insn references X if
921 it is mentioned in the destination. */
922 if (GET_CODE (SET_DEST (body
)) != CC0
923 && GET_CODE (SET_DEST (body
)) != PC
924 && !REG_P (SET_DEST (body
))
925 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
926 && REG_P (SUBREG_REG (SET_DEST (body
)))
927 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
928 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
929 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
930 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
931 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
936 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
937 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
944 return reg_overlap_mentioned_p (x
, body
);
947 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
950 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
953 case UNSPEC_VOLATILE
:
954 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
955 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
960 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
961 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
966 if (MEM_P (XEXP (body
, 0)))
967 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
972 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
974 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
981 /* Nonzero if register REG is set or clobbered in an insn between
982 FROM_INSN and TO_INSN (exclusive of those two). */
985 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
986 const rtx_insn
*to_insn
)
988 const rtx_insn
*insn
;
990 if (from_insn
== to_insn
)
993 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
994 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
999 /* Internals of reg_set_between_p. */
1001 reg_set_p (const_rtx reg
, const_rtx insn
)
1003 /* After delay slot handling, call and branch insns might be in a
1004 sequence. Check all the elements there. */
1005 if (INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
1007 for (int i
= 0; i
< XVECLEN (PATTERN (insn
), 0); ++i
)
1008 if (reg_set_p (reg
, XVECEXP (PATTERN (insn
), 0, i
)))
1014 /* We can be passed an insn or part of one. If we are passed an insn,
1015 check if a side-effect of the insn clobbers REG. */
1017 && (FIND_REG_INC_NOTE (insn
, reg
)
1020 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1021 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
1022 GET_MODE (reg
), REGNO (reg
)))
1024 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1027 return set_of (reg
, insn
) != NULL_RTX
;
1030 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1031 only if none of them are modified between START and END. Return 1 if
1032 X contains a MEM; this routine does use memory aliasing. */
1035 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1037 const enum rtx_code code
= GET_CODE (x
);
1058 if (modified_between_p (XEXP (x
, 0), start
, end
))
1060 if (MEM_READONLY_P (x
))
1062 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1063 if (memory_modified_in_insn_p (x
, insn
))
1069 return reg_set_between_p (x
, start
, end
);
1075 fmt
= GET_RTX_FORMAT (code
);
1076 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1078 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1081 else if (fmt
[i
] == 'E')
1082 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1083 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1090 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1091 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1092 does use memory aliasing. */
1095 modified_in_p (const_rtx x
, const_rtx insn
)
1097 const enum rtx_code code
= GET_CODE (x
);
1114 if (modified_in_p (XEXP (x
, 0), insn
))
1116 if (MEM_READONLY_P (x
))
1118 if (memory_modified_in_insn_p (x
, insn
))
1124 return reg_set_p (x
, insn
);
1130 fmt
= GET_RTX_FORMAT (code
);
1131 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1133 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1136 else if (fmt
[i
] == 'E')
1137 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1138 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1145 /* Helper function for set_of. */
1153 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1155 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1156 if (rtx_equal_p (x
, data
->pat
)
1157 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1161 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1162 (either directly or via STRICT_LOW_PART and similar modifiers). */
1164 set_of (const_rtx pat
, const_rtx insn
)
1166 struct set_of_data data
;
1167 data
.found
= NULL_RTX
;
1169 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1173 /* Add all hard register in X to *PSET. */
1175 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1177 subrtx_iterator::array_type array
;
1178 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1180 const_rtx x
= *iter
;
1181 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1182 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1186 /* This function, called through note_stores, collects sets and
1187 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1190 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1192 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1193 if (REG_P (x
) && HARD_REGISTER_P (x
))
1194 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1197 /* Examine INSN, and compute the set of hard registers written by it.
1198 Store it in *PSET. Should only be called after reload. */
1200 find_all_hard_reg_sets (const_rtx insn
, HARD_REG_SET
*pset
, bool implicit
)
1204 CLEAR_HARD_REG_SET (*pset
);
1205 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1209 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1211 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1212 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1214 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1215 if (REG_NOTE_KIND (link
) == REG_INC
)
1216 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1219 /* Like record_hard_reg_sets, but called through note_uses. */
1221 record_hard_reg_uses (rtx
*px
, void *data
)
1223 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1226 /* Given an INSN, return a SET expression if this insn has only a single SET.
1227 It may also have CLOBBERs, USEs, or SET whose output
1228 will not be used, which we ignore. */
1231 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1234 int set_verified
= 1;
1237 if (GET_CODE (pat
) == PARALLEL
)
1239 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1241 rtx sub
= XVECEXP (pat
, 0, i
);
1242 switch (GET_CODE (sub
))
1249 /* We can consider insns having multiple sets, where all
1250 but one are dead as single set insns. In common case
1251 only single set is present in the pattern so we want
1252 to avoid checking for REG_UNUSED notes unless necessary.
1254 When we reach set first time, we just expect this is
1255 the single set we are looking for and only when more
1256 sets are found in the insn, we check them. */
1259 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1260 && !side_effects_p (set
))
1266 set
= sub
, set_verified
= 0;
1267 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1268 || side_effects_p (sub
))
1280 /* Given an INSN, return nonzero if it has more than one SET, else return
1284 multiple_sets (const_rtx insn
)
1289 /* INSN must be an insn. */
1290 if (! INSN_P (insn
))
1293 /* Only a PARALLEL can have multiple SETs. */
1294 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1296 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1297 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1299 /* If we have already found a SET, then return now. */
1307 /* Either zero or one SET. */
1311 /* Return nonzero if the destination of SET equals the source
1312 and there are no side effects. */
1315 set_noop_p (const_rtx set
)
1317 rtx src
= SET_SRC (set
);
1318 rtx dst
= SET_DEST (set
);
1320 if (dst
== pc_rtx
&& src
== pc_rtx
)
1323 if (MEM_P (dst
) && MEM_P (src
))
1324 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1326 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1327 return rtx_equal_p (XEXP (dst
, 0), src
)
1328 && ! BYTES_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1329 && !side_effects_p (src
);
1331 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1332 dst
= XEXP (dst
, 0);
1334 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1336 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1338 src
= SUBREG_REG (src
);
1339 dst
= SUBREG_REG (dst
);
1342 /* It is a NOOP if destination overlaps with selected src vector
1344 if (GET_CODE (src
) == VEC_SELECT
1345 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1346 && HARD_REGISTER_P (XEXP (src
, 0))
1347 && HARD_REGISTER_P (dst
))
1350 rtx par
= XEXP (src
, 1);
1351 rtx src0
= XEXP (src
, 0);
1352 int c0
= INTVAL (XVECEXP (par
, 0, 0));
1353 HOST_WIDE_INT offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1355 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1356 if (INTVAL (XVECEXP (par
, 0, i
)) != c0
+ i
)
1359 simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1360 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1363 return (REG_P (src
) && REG_P (dst
)
1364 && REGNO (src
) == REGNO (dst
));
1367 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1371 noop_move_p (const_rtx insn
)
1373 rtx pat
= PATTERN (insn
);
1375 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1378 /* Insns carrying these notes are useful later on. */
1379 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1382 /* Check the code to be executed for COND_EXEC. */
1383 if (GET_CODE (pat
) == COND_EXEC
)
1384 pat
= COND_EXEC_CODE (pat
);
1386 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1389 if (GET_CODE (pat
) == PARALLEL
)
1392 /* If nothing but SETs of registers to themselves,
1393 this insn can also be deleted. */
1394 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1396 rtx tem
= XVECEXP (pat
, 0, i
);
1398 if (GET_CODE (tem
) == USE
1399 || GET_CODE (tem
) == CLOBBER
)
1402 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1412 /* Return nonzero if register in range [REGNO, ENDREGNO)
1413 appears either explicitly or implicitly in X
1414 other than being stored into.
1416 References contained within the substructure at LOC do not count.
1417 LOC may be zero, meaning don't ignore anything. */
1420 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1424 unsigned int x_regno
;
1429 /* The contents of a REG_NONNEG note is always zero, so we must come here
1430 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1434 code
= GET_CODE (x
);
1439 x_regno
= REGNO (x
);
1441 /* If we modifying the stack, frame, or argument pointer, it will
1442 clobber a virtual register. In fact, we could be more precise,
1443 but it isn't worth it. */
1444 if ((x_regno
== STACK_POINTER_REGNUM
1445 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1446 || x_regno
== ARG_POINTER_REGNUM
1448 || x_regno
== FRAME_POINTER_REGNUM
)
1449 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1452 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1455 /* If this is a SUBREG of a hard reg, we can see exactly which
1456 registers are being modified. Otherwise, handle normally. */
1457 if (REG_P (SUBREG_REG (x
))
1458 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1460 unsigned int inner_regno
= subreg_regno (x
);
1461 unsigned int inner_endregno
1462 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1463 ? subreg_nregs (x
) : 1);
1465 return endregno
> inner_regno
&& regno
< inner_endregno
;
1471 if (&SET_DEST (x
) != loc
1472 /* Note setting a SUBREG counts as referring to the REG it is in for
1473 a pseudo but not for hard registers since we can
1474 treat each word individually. */
1475 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1476 && loc
!= &SUBREG_REG (SET_DEST (x
))
1477 && REG_P (SUBREG_REG (SET_DEST (x
)))
1478 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1479 && refers_to_regno_p (regno
, endregno
,
1480 SUBREG_REG (SET_DEST (x
)), loc
))
1481 || (!REG_P (SET_DEST (x
))
1482 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1485 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1494 /* X does not match, so try its subexpressions. */
1496 fmt
= GET_RTX_FORMAT (code
);
1497 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1499 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1507 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1510 else if (fmt
[i
] == 'E')
1513 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1514 if (loc
!= &XVECEXP (x
, i
, j
)
1515 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1522 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1523 we check if any register number in X conflicts with the relevant register
1524 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1525 contains a MEM (we don't bother checking for memory addresses that can't
1526 conflict because we expect this to be a rare case. */
1529 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1531 unsigned int regno
, endregno
;
1533 /* If either argument is a constant, then modifying X can not
1534 affect IN. Here we look at IN, we can profitably combine
1535 CONSTANT_P (x) with the switch statement below. */
1536 if (CONSTANT_P (in
))
1540 switch (GET_CODE (x
))
1542 case STRICT_LOW_PART
:
1545 /* Overly conservative. */
1550 regno
= REGNO (SUBREG_REG (x
));
1551 if (regno
< FIRST_PSEUDO_REGISTER
)
1552 regno
= subreg_regno (x
);
1553 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1554 ? subreg_nregs (x
) : 1);
1559 endregno
= END_REGNO (x
);
1561 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1571 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1572 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1575 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1578 else if (fmt
[i
] == 'E')
1581 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1582 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1592 return reg_mentioned_p (x
, in
);
1598 /* If any register in here refers to it we return true. */
1599 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1600 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1601 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1607 gcc_assert (CONSTANT_P (x
));
1612 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1613 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1614 ignored by note_stores, but passed to FUN.
1616 FUN receives three arguments:
1617 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1618 2. the SET or CLOBBER rtx that does the store,
1619 3. the pointer DATA provided to note_stores.
1621 If the item being stored in or clobbered is a SUBREG of a hard register,
1622 the SUBREG will be passed. */
1625 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1629 if (GET_CODE (x
) == COND_EXEC
)
1630 x
= COND_EXEC_CODE (x
);
1632 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1634 rtx dest
= SET_DEST (x
);
1636 while ((GET_CODE (dest
) == SUBREG
1637 && (!REG_P (SUBREG_REG (dest
))
1638 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1639 || GET_CODE (dest
) == ZERO_EXTRACT
1640 || GET_CODE (dest
) == STRICT_LOW_PART
)
1641 dest
= XEXP (dest
, 0);
1643 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1644 each of whose first operand is a register. */
1645 if (GET_CODE (dest
) == PARALLEL
)
1647 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1648 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1649 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1652 (*fun
) (dest
, x
, data
);
1655 else if (GET_CODE (x
) == PARALLEL
)
1656 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1657 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1660 /* Like notes_stores, but call FUN for each expression that is being
1661 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1662 FUN for each expression, not any interior subexpressions. FUN receives a
1663 pointer to the expression and the DATA passed to this function.
1665 Note that this is not quite the same test as that done in reg_referenced_p
1666 since that considers something as being referenced if it is being
1667 partially set, while we do not. */
1670 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1675 switch (GET_CODE (body
))
1678 (*fun
) (&COND_EXEC_TEST (body
), data
);
1679 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1683 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1684 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1688 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1689 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1693 (*fun
) (&XEXP (body
, 0), data
);
1697 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1698 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1702 (*fun
) (&TRAP_CONDITION (body
), data
);
1706 (*fun
) (&XEXP (body
, 0), data
);
1710 case UNSPEC_VOLATILE
:
1711 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1712 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1716 if (MEM_P (XEXP (body
, 0)))
1717 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1722 rtx dest
= SET_DEST (body
);
1724 /* For sets we replace everything in source plus registers in memory
1725 expression in store and operands of a ZERO_EXTRACT. */
1726 (*fun
) (&SET_SRC (body
), data
);
1728 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1730 (*fun
) (&XEXP (dest
, 1), data
);
1731 (*fun
) (&XEXP (dest
, 2), data
);
1734 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1735 dest
= XEXP (dest
, 0);
1738 (*fun
) (&XEXP (dest
, 0), data
);
1743 /* All the other possibilities never store. */
1744 (*fun
) (pbody
, data
);
1749 /* Return nonzero if X's old contents don't survive after INSN.
1750 This will be true if X is (cc0) or if X is a register and
1751 X dies in INSN or because INSN entirely sets X.
1753 "Entirely set" means set directly and not through a SUBREG, or
1754 ZERO_EXTRACT, so no trace of the old contents remains.
1755 Likewise, REG_INC does not count.
1757 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1758 but for this use that makes no difference, since regs don't overlap
1759 during their lifetimes. Therefore, this function may be used
1760 at any time after deaths have been computed.
1762 If REG is a hard reg that occupies multiple machine registers, this
1763 function will only return 1 if each of those registers will be replaced
1767 dead_or_set_p (const_rtx insn
, const_rtx x
)
1769 unsigned int regno
, end_regno
;
1772 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1773 if (GET_CODE (x
) == CC0
)
1776 gcc_assert (REG_P (x
));
1779 end_regno
= END_REGNO (x
);
1780 for (i
= regno
; i
< end_regno
; i
++)
1781 if (! dead_or_set_regno_p (insn
, i
))
1787 /* Return TRUE iff DEST is a register or subreg of a register and
1788 doesn't change the number of words of the inner register, and any
1789 part of the register is TEST_REGNO. */
1792 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
1794 unsigned int regno
, endregno
;
1796 if (GET_CODE (dest
) == SUBREG
1797 && (((GET_MODE_SIZE (GET_MODE (dest
))
1798 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
1799 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
1800 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
1801 dest
= SUBREG_REG (dest
);
1806 regno
= REGNO (dest
);
1807 endregno
= END_REGNO (dest
);
1808 return (test_regno
>= regno
&& test_regno
< endregno
);
1811 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1812 any member matches the covers_regno_no_parallel_p criteria. */
1815 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
1817 if (GET_CODE (dest
) == PARALLEL
)
1819 /* Some targets place small structures in registers for return
1820 values of functions, and those registers are wrapped in
1821 PARALLELs that we may see as the destination of a SET. */
1824 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1826 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
1827 if (inner
!= NULL_RTX
1828 && covers_regno_no_parallel_p (inner
, test_regno
))
1835 return covers_regno_no_parallel_p (dest
, test_regno
);
1838 /* Utility function for dead_or_set_p to check an individual register. */
1841 dead_or_set_regno_p (const_rtx insn
, unsigned int test_regno
)
1845 /* See if there is a death note for something that includes TEST_REGNO. */
1846 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
1850 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
1853 pattern
= PATTERN (insn
);
1855 /* If a COND_EXEC is not executed, the value survives. */
1856 if (GET_CODE (pattern
) == COND_EXEC
)
1859 if (GET_CODE (pattern
) == SET
)
1860 return covers_regno_p (SET_DEST (pattern
), test_regno
);
1861 else if (GET_CODE (pattern
) == PARALLEL
)
1865 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
1867 rtx body
= XVECEXP (pattern
, 0, i
);
1869 if (GET_CODE (body
) == COND_EXEC
)
1870 body
= COND_EXEC_CODE (body
);
1872 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
1873 && covers_regno_p (SET_DEST (body
), test_regno
))
1881 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1882 If DATUM is nonzero, look for one whose datum is DATUM. */
1885 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
1889 gcc_checking_assert (insn
);
1891 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1892 if (! INSN_P (insn
))
1896 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1897 if (REG_NOTE_KIND (link
) == kind
)
1902 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1903 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
1908 /* Return the reg-note of kind KIND in insn INSN which applies to register
1909 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1910 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1911 it might be the case that the note overlaps REGNO. */
1914 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
1918 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1919 if (! INSN_P (insn
))
1922 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1923 if (REG_NOTE_KIND (link
) == kind
1924 /* Verify that it is a register, so that scratch and MEM won't cause a
1926 && REG_P (XEXP (link
, 0))
1927 && REGNO (XEXP (link
, 0)) <= regno
1928 && END_REGNO (XEXP (link
, 0)) > regno
)
1933 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1937 find_reg_equal_equiv_note (const_rtx insn
)
1944 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1945 if (REG_NOTE_KIND (link
) == REG_EQUAL
1946 || REG_NOTE_KIND (link
) == REG_EQUIV
)
1948 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1949 insns that have multiple sets. Checking single_set to
1950 make sure of this is not the proper check, as explained
1951 in the comment in set_unique_reg_note.
1953 This should be changed into an assert. */
1954 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
1961 /* Check whether INSN is a single_set whose source is known to be
1962 equivalent to a constant. Return that constant if so, otherwise
1966 find_constant_src (const rtx_insn
*insn
)
1970 set
= single_set (insn
);
1973 x
= avoid_constant_pool_reference (SET_SRC (set
));
1978 note
= find_reg_equal_equiv_note (insn
);
1979 if (note
&& CONSTANT_P (XEXP (note
, 0)))
1980 return XEXP (note
, 0);
1985 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1986 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1989 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
1991 /* If it's not a CALL_INSN, it can't possibly have a
1992 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2002 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
2004 link
= XEXP (link
, 1))
2005 if (GET_CODE (XEXP (link
, 0)) == code
2006 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
2011 unsigned int regno
= REGNO (datum
);
2013 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2014 to pseudo registers, so don't bother checking. */
2016 if (regno
< FIRST_PSEUDO_REGISTER
)
2018 unsigned int end_regno
= END_HARD_REGNO (datum
);
2021 for (i
= regno
; i
< end_regno
; i
++)
2022 if (find_regno_fusage (insn
, code
, i
))
2030 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2031 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2034 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2038 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2039 to pseudo registers, so don't bother checking. */
2041 if (regno
>= FIRST_PSEUDO_REGISTER
2045 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2049 if (GET_CODE (op
= XEXP (link
, 0)) == code
2050 && REG_P (reg
= XEXP (op
, 0))
2051 && REGNO (reg
) <= regno
2052 && END_HARD_REGNO (reg
) > regno
)
2060 /* Return true if KIND is an integer REG_NOTE. */
2063 int_reg_note_p (enum reg_note kind
)
2065 return kind
== REG_BR_PROB
;
2068 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2069 stored as the pointer to the next register note. */
2072 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2076 gcc_checking_assert (!int_reg_note_p (kind
));
2081 case REG_LABEL_TARGET
:
2082 case REG_LABEL_OPERAND
:
2084 /* These types of register notes use an INSN_LIST rather than an
2085 EXPR_LIST, so that copying is done right and dumps look
2087 note
= alloc_INSN_LIST (datum
, list
);
2088 PUT_REG_NOTE_KIND (note
, kind
);
2092 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2099 /* Add register note with kind KIND and datum DATUM to INSN. */
2102 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2104 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2107 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2110 add_int_reg_note (rtx insn
, enum reg_note kind
, int datum
)
2112 gcc_checking_assert (int_reg_note_p (kind
));
2113 REG_NOTES (insn
) = gen_rtx_INT_LIST ((machine_mode
) kind
,
2114 datum
, REG_NOTES (insn
));
2117 /* Add a register note like NOTE to INSN. */
2120 add_shallow_copy_of_reg_note (rtx insn
, rtx note
)
2122 if (GET_CODE (note
) == INT_LIST
)
2123 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2125 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2128 /* Remove register note NOTE from the REG_NOTES of INSN. */
2131 remove_note (rtx insn
, const_rtx note
)
2135 if (note
== NULL_RTX
)
2138 if (REG_NOTES (insn
) == note
)
2139 REG_NOTES (insn
) = XEXP (note
, 1);
2141 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2142 if (XEXP (link
, 1) == note
)
2144 XEXP (link
, 1) = XEXP (note
, 1);
2148 switch (REG_NOTE_KIND (note
))
2152 df_notes_rescan (as_a
<rtx_insn
*> (insn
));
2159 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2162 remove_reg_equal_equiv_notes (rtx insn
)
2166 loc
= ®_NOTES (insn
);
2169 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2170 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2171 *loc
= XEXP (*loc
, 1);
2173 loc
= &XEXP (*loc
, 1);
2177 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2180 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2187 /* This loop is a little tricky. We cannot just go down the chain because
2188 it is being modified by some actions in the loop. So we just iterate
2189 over the head. We plan to drain the list anyway. */
2190 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2192 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2193 rtx note
= find_reg_equal_equiv_note (insn
);
2195 /* This assert is generally triggered when someone deletes a REG_EQUAL
2196 or REG_EQUIV note by hacking the list manually rather than calling
2200 remove_note (insn
, note
);
2204 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2205 return 1 if it is found. A simple equality test is used to determine if
2209 in_expr_list_p (const_rtx listp
, const_rtx node
)
2213 for (x
= listp
; x
; x
= XEXP (x
, 1))
2214 if (node
== XEXP (x
, 0))
2220 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2221 remove that entry from the list if it is found.
2223 A simple equality test is used to determine if NODE matches. */
2226 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2228 rtx_expr_list
*temp
= *listp
;
2229 rtx prev
= NULL_RTX
;
2233 if (node
== temp
->element ())
2235 /* Splice the node out of the list. */
2237 XEXP (prev
, 1) = temp
->next ();
2239 *listp
= temp
->next ();
2245 temp
= temp
->next ();
2249 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2250 remove that entry from the list if it is found.
2252 A simple equality test is used to determine if NODE matches. */
2255 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2257 rtx_insn_list
*temp
= *listp
;
2262 if (node
== temp
->insn ())
2264 /* Splice the node out of the list. */
2266 XEXP (prev
, 1) = temp
->next ();
2268 *listp
= temp
->next ();
2274 temp
= temp
->next ();
2278 /* Nonzero if X contains any volatile instructions. These are instructions
2279 which may cause unpredictable machine state instructions, and thus no
2280 instructions or register uses should be moved or combined across them.
2281 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2284 volatile_insn_p (const_rtx x
)
2286 const RTX_CODE code
= GET_CODE (x
);
2304 case UNSPEC_VOLATILE
:
2309 if (MEM_VOLATILE_P (x
))
2316 /* Recursively scan the operands of this expression. */
2319 const char *const fmt
= GET_RTX_FORMAT (code
);
2322 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2326 if (volatile_insn_p (XEXP (x
, i
)))
2329 else if (fmt
[i
] == 'E')
2332 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2333 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2341 /* Nonzero if X contains any volatile memory references
2342 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2345 volatile_refs_p (const_rtx x
)
2347 const RTX_CODE code
= GET_CODE (x
);
2363 case UNSPEC_VOLATILE
:
2369 if (MEM_VOLATILE_P (x
))
2376 /* Recursively scan the operands of this expression. */
2379 const char *const fmt
= GET_RTX_FORMAT (code
);
2382 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2386 if (volatile_refs_p (XEXP (x
, i
)))
2389 else if (fmt
[i
] == 'E')
2392 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2393 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2401 /* Similar to above, except that it also rejects register pre- and post-
2405 side_effects_p (const_rtx x
)
2407 const RTX_CODE code
= GET_CODE (x
);
2424 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2425 when some combination can't be done. If we see one, don't think
2426 that we can simplify the expression. */
2427 return (GET_MODE (x
) != VOIDmode
);
2436 case UNSPEC_VOLATILE
:
2442 if (MEM_VOLATILE_P (x
))
2449 /* Recursively scan the operands of this expression. */
2452 const char *fmt
= GET_RTX_FORMAT (code
);
2455 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2459 if (side_effects_p (XEXP (x
, i
)))
2462 else if (fmt
[i
] == 'E')
2465 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2466 if (side_effects_p (XVECEXP (x
, i
, j
)))
2474 /* Return nonzero if evaluating rtx X might cause a trap.
2475 FLAGS controls how to consider MEMs. A nonzero means the context
2476 of the access may have changed from the original, such that the
2477 address may have become invalid. */
2480 may_trap_p_1 (const_rtx x
, unsigned flags
)
2486 /* We make no distinction currently, but this function is part of
2487 the internal target-hooks ABI so we keep the parameter as
2488 "unsigned flags". */
2489 bool code_changed
= flags
!= 0;
2493 code
= GET_CODE (x
);
2496 /* Handle these cases quickly. */
2508 return targetm
.unspec_may_trap_p (x
, flags
);
2510 case UNSPEC_VOLATILE
:
2516 return MEM_VOLATILE_P (x
);
2518 /* Memory ref can trap unless it's a static var or a stack slot. */
2520 /* Recognize specific pattern of stack checking probes. */
2521 if (flag_stack_check
2522 && MEM_VOLATILE_P (x
)
2523 && XEXP (x
, 0) == stack_pointer_rtx
)
2525 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2526 reference; moving it out of context such as when moving code
2527 when optimizing, might cause its address to become invalid. */
2529 || !MEM_NOTRAP_P (x
))
2531 HOST_WIDE_INT size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : 0;
2532 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2533 GET_MODE (x
), code_changed
);
2538 /* Division by a non-constant might trap. */
2543 if (HONOR_SNANS (x
))
2545 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2546 return flag_trapping_math
;
2547 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2552 /* An EXPR_LIST is used to represent a function call. This
2553 certainly may trap. */
2562 /* Some floating point comparisons may trap. */
2563 if (!flag_trapping_math
)
2565 /* ??? There is no machine independent way to check for tests that trap
2566 when COMPARE is used, though many targets do make this distinction.
2567 For instance, sparc uses CCFPE for compares which generate exceptions
2568 and CCFP for compares which do not generate exceptions. */
2571 /* But often the compare has some CC mode, so check operand
2573 if (HONOR_NANS (XEXP (x
, 0))
2574 || HONOR_NANS (XEXP (x
, 1)))
2580 if (HONOR_SNANS (x
))
2582 /* Often comparison is CC mode, so check operand modes. */
2583 if (HONOR_SNANS (XEXP (x
, 0))
2584 || HONOR_SNANS (XEXP (x
, 1)))
2589 /* Conversion of floating point might trap. */
2590 if (flag_trapping_math
&& HONOR_NANS (XEXP (x
, 0)))
2597 /* These operations don't trap even with floating point. */
2601 /* Any floating arithmetic may trap. */
2602 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2606 fmt
= GET_RTX_FORMAT (code
);
2607 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2611 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2614 else if (fmt
[i
] == 'E')
2617 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2618 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2625 /* Return nonzero if evaluating rtx X might cause a trap. */
2628 may_trap_p (const_rtx x
)
2630 return may_trap_p_1 (x
, 0);
2633 /* Same as above, but additionally return nonzero if evaluating rtx X might
2634 cause a fault. We define a fault for the purpose of this function as a
2635 erroneous execution condition that cannot be encountered during the normal
2636 execution of a valid program; the typical example is an unaligned memory
2637 access on a strict alignment machine. The compiler guarantees that it
2638 doesn't generate code that will fault from a valid program, but this
2639 guarantee doesn't mean anything for individual instructions. Consider
2640 the following example:
2642 struct S { int d; union { char *cp; int *ip; }; };
2644 int foo(struct S *s)
2652 on a strict alignment machine. In a valid program, foo will never be
2653 invoked on a structure for which d is equal to 1 and the underlying
2654 unique field of the union not aligned on a 4-byte boundary, but the
2655 expression *s->ip might cause a fault if considered individually.
2657 At the RTL level, potentially problematic expressions will almost always
2658 verify may_trap_p; for example, the above dereference can be emitted as
2659 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2660 However, suppose that foo is inlined in a caller that causes s->cp to
2661 point to a local character variable and guarantees that s->d is not set
2662 to 1; foo may have been effectively translated into pseudo-RTL as:
2665 (set (reg:SI) (mem:SI (%fp - 7)))
2667 (set (reg:QI) (mem:QI (%fp - 7)))
2669 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2670 memory reference to a stack slot, but it will certainly cause a fault
2671 on a strict alignment machine. */
2674 may_trap_or_fault_p (const_rtx x
)
2676 return may_trap_p_1 (x
, 1);
2679 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2680 i.e., an inequality. */
2683 inequality_comparisons_p (const_rtx x
)
2687 const enum rtx_code code
= GET_CODE (x
);
2715 len
= GET_RTX_LENGTH (code
);
2716 fmt
= GET_RTX_FORMAT (code
);
2718 for (i
= 0; i
< len
; i
++)
2722 if (inequality_comparisons_p (XEXP (x
, i
)))
2725 else if (fmt
[i
] == 'E')
2728 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2729 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2737 /* Replace any occurrence of FROM in X with TO. The function does
2738 not enter into CONST_DOUBLE for the replace.
2740 Note that copying is not done so X must not be shared unless all copies
2741 are to be modified. */
2744 replace_rtx (rtx x
, rtx from
, rtx to
)
2752 /* Allow this function to make replacements in EXPR_LISTs. */
2756 if (GET_CODE (x
) == SUBREG
)
2758 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
);
2760 if (CONST_INT_P (new_rtx
))
2762 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
2763 GET_MODE (SUBREG_REG (x
)),
2768 SUBREG_REG (x
) = new_rtx
;
2772 else if (GET_CODE (x
) == ZERO_EXTEND
)
2774 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
);
2776 if (CONST_INT_P (new_rtx
))
2778 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
2779 new_rtx
, GET_MODE (XEXP (x
, 0)));
2783 XEXP (x
, 0) = new_rtx
;
2788 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2789 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2792 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
);
2793 else if (fmt
[i
] == 'E')
2794 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2795 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
), from
, to
);
2801 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
2802 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
2805 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
2807 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
2809 if (JUMP_TABLE_DATA_P (x
))
2812 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
2813 int len
= GET_NUM_ELEM (vec
);
2814 for (int i
= 0; i
< len
; ++i
)
2816 rtx ref
= RTVEC_ELT (vec
, i
);
2817 if (XEXP (ref
, 0) == old_label
)
2819 XEXP (ref
, 0) = new_label
;
2820 if (update_label_nuses
)
2822 ++LABEL_NUSES (new_label
);
2823 --LABEL_NUSES (old_label
);
2830 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2831 field. This is not handled by the iterator because it doesn't
2832 handle unprinted ('0') fields. */
2833 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
2834 JUMP_LABEL (x
) = new_label
;
2836 subrtx_ptr_iterator::array_type array
;
2837 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
2842 if (GET_CODE (x
) == SYMBOL_REF
2843 && CONSTANT_POOL_ADDRESS_P (x
))
2845 rtx c
= get_pool_constant (x
);
2846 if (rtx_referenced_p (old_label
, c
))
2848 /* Create a copy of constant C; replace the label inside
2849 but do not update LABEL_NUSES because uses in constant pool
2851 rtx new_c
= copy_rtx (c
);
2852 replace_label (&new_c
, old_label
, new_label
, false);
2854 /* Add the new constant NEW_C to constant pool and replace
2855 the old reference to constant by new reference. */
2856 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
2857 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
2861 if ((GET_CODE (x
) == LABEL_REF
2862 || GET_CODE (x
) == INSN_LIST
)
2863 && XEXP (x
, 0) == old_label
)
2865 XEXP (x
, 0) = new_label
;
2866 if (update_label_nuses
)
2868 ++LABEL_NUSES (new_label
);
2869 --LABEL_NUSES (old_label
);
2877 replace_label_in_insn (rtx_insn
*insn
, rtx old_label
, rtx new_label
,
2878 bool update_label_nuses
)
2880 rtx insn_as_rtx
= insn
;
2881 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
2882 gcc_checking_assert (insn_as_rtx
== insn
);
2885 /* Return true if X is referenced in BODY. */
2888 rtx_referenced_p (const_rtx x
, const_rtx body
)
2890 subrtx_iterator::array_type array
;
2891 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
2892 if (const_rtx y
= *iter
)
2894 /* Check if a label_ref Y refers to label X. */
2895 if (GET_CODE (y
) == LABEL_REF
2897 && LABEL_REF_LABEL (y
) == x
)
2900 if (rtx_equal_p (x
, y
))
2903 /* If Y is a reference to pool constant traverse the constant. */
2904 if (GET_CODE (y
) == SYMBOL_REF
2905 && CONSTANT_POOL_ADDRESS_P (y
))
2906 iter
.substitute (get_pool_constant (y
));
2911 /* If INSN is a tablejump return true and store the label (before jump table) to
2912 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2915 tablejump_p (const rtx_insn
*insn
, rtx
*labelp
, rtx_jump_table_data
**tablep
)
2922 label
= JUMP_LABEL (insn
);
2923 if (label
!= NULL_RTX
&& !ANY_RETURN_P (label
)
2924 && (table
= NEXT_INSN (as_a
<rtx_insn
*> (label
))) != NULL_RTX
2925 && JUMP_TABLE_DATA_P (table
))
2930 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
2936 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2937 constant that is not in the constant pool and not in the condition
2938 of an IF_THEN_ELSE. */
2941 computed_jump_p_1 (const_rtx x
)
2943 const enum rtx_code code
= GET_CODE (x
);
2960 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
2961 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
2964 return (computed_jump_p_1 (XEXP (x
, 1))
2965 || computed_jump_p_1 (XEXP (x
, 2)));
2971 fmt
= GET_RTX_FORMAT (code
);
2972 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2975 && computed_jump_p_1 (XEXP (x
, i
)))
2978 else if (fmt
[i
] == 'E')
2979 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2980 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
2987 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2989 Tablejumps and casesi insns are not considered indirect jumps;
2990 we can recognize them by a (use (label_ref)). */
2993 computed_jump_p (const_rtx insn
)
2998 rtx pat
= PATTERN (insn
);
3000 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3001 if (JUMP_LABEL (insn
) != NULL
)
3004 if (GET_CODE (pat
) == PARALLEL
)
3006 int len
= XVECLEN (pat
, 0);
3007 int has_use_labelref
= 0;
3009 for (i
= len
- 1; i
>= 0; i
--)
3010 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
3011 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3014 has_use_labelref
= 1;
3018 if (! has_use_labelref
)
3019 for (i
= len
- 1; i
>= 0; i
--)
3020 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3021 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3022 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3025 else if (GET_CODE (pat
) == SET
3026 && SET_DEST (pat
) == pc_rtx
3027 && computed_jump_p_1 (SET_SRC (pat
)))
3035 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3036 the equivalent add insn and pass the result to FN, using DATA as the
3040 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3042 rtx x
= XEXP (mem
, 0);
3043 switch (GET_CODE (x
))
3048 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3049 rtx r1
= XEXP (x
, 0);
3050 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3051 return fn (mem
, x
, r1
, r1
, c
, data
);
3057 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3058 rtx r1
= XEXP (x
, 0);
3059 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3060 return fn (mem
, x
, r1
, r1
, c
, data
);
3066 rtx r1
= XEXP (x
, 0);
3067 rtx add
= XEXP (x
, 1);
3068 return fn (mem
, x
, r1
, add
, NULL
, data
);
3076 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3077 For each such autoinc operation found, call FN, passing it
3078 the innermost enclosing MEM, the operation itself, the RTX modified
3079 by the operation, two RTXs (the second may be NULL) that, once
3080 added, represent the value to be held by the modified RTX
3081 afterwards, and DATA. FN is to return 0 to continue the
3082 traversal or any other value to have it returned to the caller of
3083 for_each_inc_dec. */
3086 for_each_inc_dec (rtx x
,
3087 for_each_inc_dec_fn fn
,
3090 subrtx_var_iterator::array_type array
;
3091 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3096 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3098 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3101 iter
.skip_subrtxes ();
3108 /* Searches X for any reference to REGNO, returning the rtx of the
3109 reference found if any. Otherwise, returns NULL_RTX. */
3112 regno_use_in (unsigned int regno
, rtx x
)
3118 if (REG_P (x
) && REGNO (x
) == regno
)
3121 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3122 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3126 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3129 else if (fmt
[i
] == 'E')
3130 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3131 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3138 /* Return a value indicating whether OP, an operand of a commutative
3139 operation, is preferred as the first or second operand. The higher
3140 the value, the stronger the preference for being the first operand.
3141 We use negative values to indicate a preference for the first operand
3142 and positive values for the second operand. */
3145 commutative_operand_precedence (rtx op
)
3147 enum rtx_code code
= GET_CODE (op
);
3149 /* Constants always come the second operand. Prefer "nice" constants. */
3150 if (code
== CONST_INT
)
3152 if (code
== CONST_WIDE_INT
)
3154 if (code
== CONST_DOUBLE
)
3156 if (code
== CONST_FIXED
)
3158 op
= avoid_constant_pool_reference (op
);
3159 code
= GET_CODE (op
);
3161 switch (GET_RTX_CLASS (code
))
3164 if (code
== CONST_INT
)
3166 if (code
== CONST_WIDE_INT
)
3168 if (code
== CONST_DOUBLE
)
3170 if (code
== CONST_FIXED
)
3175 /* SUBREGs of objects should come second. */
3176 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3181 /* Complex expressions should be the first, so decrease priority
3182 of objects. Prefer pointer objects over non pointer objects. */
3183 if ((REG_P (op
) && REG_POINTER (op
))
3184 || (MEM_P (op
) && MEM_POINTER (op
)))
3188 case RTX_COMM_ARITH
:
3189 /* Prefer operands that are themselves commutative to be first.
3190 This helps to make things linear. In particular,
3191 (and (and (reg) (reg)) (not (reg))) is canonical. */
3195 /* If only one operand is a binary expression, it will be the first
3196 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3197 is canonical, although it will usually be further simplified. */
3201 /* Then prefer NEG and NOT. */
3202 if (code
== NEG
|| code
== NOT
)
3210 /* Return 1 iff it is necessary to swap operands of commutative operation
3211 in order to canonicalize expression. */
3214 swap_commutative_operands_p (rtx x
, rtx y
)
3216 return (commutative_operand_precedence (x
)
3217 < commutative_operand_precedence (y
));
3220 /* Return 1 if X is an autoincrement side effect and the register is
3221 not the stack pointer. */
3223 auto_inc_p (const_rtx x
)
3225 switch (GET_CODE (x
))
3233 /* There are no REG_INC notes for SP. */
3234 if (XEXP (x
, 0) != stack_pointer_rtx
)
3242 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3244 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3253 code
= GET_CODE (in
);
3254 fmt
= GET_RTX_FORMAT (code
);
3255 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3259 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3262 else if (fmt
[i
] == 'E')
3263 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3264 if (loc
== &XVECEXP (in
, i
, j
)
3265 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3271 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3272 and SUBREG_BYTE, return the bit offset where the subreg begins
3273 (counting from the least significant bit of the operand). */
3276 subreg_lsb_1 (machine_mode outer_mode
,
3277 machine_mode inner_mode
,
3278 unsigned int subreg_byte
)
3280 unsigned int bitpos
;
3284 /* A paradoxical subreg begins at bit position 0. */
3285 if (GET_MODE_PRECISION (outer_mode
) > GET_MODE_PRECISION (inner_mode
))
3288 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3289 /* If the subreg crosses a word boundary ensure that
3290 it also begins and ends on a word boundary. */
3291 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3292 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3293 && (subreg_byte
% UNITS_PER_WORD
3294 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3296 if (WORDS_BIG_ENDIAN
)
3297 word
= (GET_MODE_SIZE (inner_mode
)
3298 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3300 word
= subreg_byte
/ UNITS_PER_WORD
;
3301 bitpos
= word
* BITS_PER_WORD
;
3303 if (BYTES_BIG_ENDIAN
)
3304 byte
= (GET_MODE_SIZE (inner_mode
)
3305 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3307 byte
= subreg_byte
% UNITS_PER_WORD
;
3308 bitpos
+= byte
* BITS_PER_UNIT
;
3313 /* Given a subreg X, return the bit offset where the subreg begins
3314 (counting from the least significant bit of the reg). */
3317 subreg_lsb (const_rtx x
)
3319 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3323 /* Fill in information about a subreg of a hard register.
3324 xregno - A regno of an inner hard subreg_reg (or what will become one).
3325 xmode - The mode of xregno.
3326 offset - The byte offset.
3327 ymode - The mode of a top level SUBREG (or what may become one).
3328 info - Pointer to structure to fill in.
3330 Rather than considering one particular inner register (and thus one
3331 particular "outer" register) in isolation, this function really uses
3332 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3333 function does not check whether adding INFO->offset to XREGNO gives
3334 a valid hard register; even if INFO->offset + XREGNO is out of range,
3335 there might be another register of the same type that is in range.
3336 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3337 register, since that can depend on things like whether the final
3338 register number is even or odd. Callers that want to check whether
3339 this particular subreg can be replaced by a simple (reg ...) should
3340 use simplify_subreg_regno. */
3343 subreg_get_info (unsigned int xregno
, machine_mode xmode
,
3344 unsigned int offset
, machine_mode ymode
,
3345 struct subreg_info
*info
)
3347 int nregs_xmode
, nregs_ymode
;
3348 int mode_multiple
, nregs_multiple
;
3349 int offset_adj
, y_offset
, y_offset_adj
;
3350 int regsize_xmode
, regsize_ymode
;
3353 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3357 /* If there are holes in a non-scalar mode in registers, we expect
3358 that it is made up of its units concatenated together. */
3359 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3361 machine_mode xmode_unit
;
3363 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3364 if (GET_MODE_INNER (xmode
) == VOIDmode
)
3367 xmode_unit
= GET_MODE_INNER (xmode
);
3368 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3369 gcc_assert (nregs_xmode
3370 == (GET_MODE_NUNITS (xmode
)
3371 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3372 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3373 == (hard_regno_nregs
[xregno
][xmode_unit
]
3374 * GET_MODE_NUNITS (xmode
)));
3376 /* You can only ask for a SUBREG of a value with holes in the middle
3377 if you don't cross the holes. (Such a SUBREG should be done by
3378 picking a different register class, or doing it in memory if
3379 necessary.) An example of a value with holes is XCmode on 32-bit
3380 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3381 3 for each part, but in memory it's two 128-bit parts.
3382 Padding is assumed to be at the end (not necessarily the 'high part')
3384 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1
3385 < GET_MODE_NUNITS (xmode
))
3386 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3387 != ((offset
+ GET_MODE_SIZE (ymode
) - 1)
3388 / GET_MODE_SIZE (xmode_unit
))))
3390 info
->representable_p
= false;
3395 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3397 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3399 /* Paradoxical subregs are otherwise valid. */
3402 && GET_MODE_PRECISION (ymode
) > GET_MODE_PRECISION (xmode
))
3404 info
->representable_p
= true;
3405 /* If this is a big endian paradoxical subreg, which uses more
3406 actual hard registers than the original register, we must
3407 return a negative offset so that we find the proper highpart
3409 if (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
3410 ? REG_WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
3411 info
->offset
= nregs_xmode
- nregs_ymode
;
3414 info
->nregs
= nregs_ymode
;
3418 /* If registers store different numbers of bits in the different
3419 modes, we cannot generally form this subreg. */
3420 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3421 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3422 && (GET_MODE_SIZE (xmode
) % nregs_xmode
) == 0
3423 && (GET_MODE_SIZE (ymode
) % nregs_ymode
) == 0)
3425 regsize_xmode
= GET_MODE_SIZE (xmode
) / nregs_xmode
;
3426 regsize_ymode
= GET_MODE_SIZE (ymode
) / nregs_ymode
;
3427 if (!rknown
&& regsize_xmode
> regsize_ymode
&& nregs_ymode
> 1)
3429 info
->representable_p
= false;
3431 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3432 info
->offset
= offset
/ regsize_xmode
;
3435 if (!rknown
&& regsize_ymode
> regsize_xmode
&& nregs_xmode
> 1)
3437 info
->representable_p
= false;
3439 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3440 info
->offset
= offset
/ regsize_xmode
;
3445 /* Lowpart subregs are otherwise valid. */
3446 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3448 info
->representable_p
= true;
3451 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3454 info
->nregs
= nregs_ymode
;
3459 /* This should always pass, otherwise we don't know how to verify
3460 the constraint. These conditions may be relaxed but
3461 subreg_regno_offset would need to be redesigned. */
3462 gcc_assert ((GET_MODE_SIZE (xmode
) % GET_MODE_SIZE (ymode
)) == 0);
3463 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3465 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
3466 && GET_MODE_SIZE (xmode
) > UNITS_PER_WORD
)
3468 HOST_WIDE_INT xsize
= GET_MODE_SIZE (xmode
);
3469 HOST_WIDE_INT ysize
= GET_MODE_SIZE (ymode
);
3470 HOST_WIDE_INT off_low
= offset
& (ysize
- 1);
3471 HOST_WIDE_INT off_high
= offset
& ~(ysize
- 1);
3472 offset
= (xsize
- ysize
- off_high
) | off_low
;
3474 /* The XMODE value can be seen as a vector of NREGS_XMODE
3475 values. The subreg must represent a lowpart of given field.
3476 Compute what field it is. */
3477 offset_adj
= offset
;
3478 offset_adj
-= subreg_lowpart_offset (ymode
,
3479 mode_for_size (GET_MODE_BITSIZE (xmode
)
3483 /* Size of ymode must not be greater than the size of xmode. */
3484 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
3485 gcc_assert (mode_multiple
!= 0);
3487 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
3488 y_offset_adj
= offset_adj
/ GET_MODE_SIZE (ymode
);
3489 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
3491 gcc_assert ((offset_adj
% GET_MODE_SIZE (ymode
)) == 0);
3492 gcc_assert ((mode_multiple
% nregs_multiple
) == 0);
3496 info
->representable_p
= (!(y_offset_adj
% (mode_multiple
/ nregs_multiple
)));
3499 info
->offset
= (y_offset
/ (mode_multiple
/ nregs_multiple
)) * nregs_ymode
;
3500 info
->nregs
= nregs_ymode
;
3503 /* This function returns the regno offset of a subreg expression.
3504 xregno - A regno of an inner hard subreg_reg (or what will become one).
3505 xmode - The mode of xregno.
3506 offset - The byte offset.
3507 ymode - The mode of a top level SUBREG (or what may become one).
3508 RETURN - The regno offset which would be used. */
3510 subreg_regno_offset (unsigned int xregno
, machine_mode xmode
,
3511 unsigned int offset
, machine_mode ymode
)
3513 struct subreg_info info
;
3514 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3518 /* This function returns true when the offset is representable via
3519 subreg_offset in the given regno.
3520 xregno - A regno of an inner hard subreg_reg (or what will become one).
3521 xmode - The mode of xregno.
3522 offset - The byte offset.
3523 ymode - The mode of a top level SUBREG (or what may become one).
3524 RETURN - Whether the offset is representable. */
3526 subreg_offset_representable_p (unsigned int xregno
, machine_mode xmode
,
3527 unsigned int offset
, machine_mode ymode
)
3529 struct subreg_info info
;
3530 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3531 return info
.representable_p
;
3534 /* Return the number of a YMODE register to which
3536 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3538 can be simplified. Return -1 if the subreg can't be simplified.
3540 XREGNO is a hard register number. */
3543 simplify_subreg_regno (unsigned int xregno
, machine_mode xmode
,
3544 unsigned int offset
, machine_mode ymode
)
3546 struct subreg_info info
;
3547 unsigned int yregno
;
3549 #ifdef CANNOT_CHANGE_MODE_CLASS
3550 /* Give the backend a chance to disallow the mode change. */
3551 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3552 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3553 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3554 /* We can use mode change in LRA for some transformations. */
3555 && ! lra_in_progress
)
3559 /* We shouldn't simplify stack-related registers. */
3560 if ((!reload_completed
|| frame_pointer_needed
)
3561 && xregno
== FRAME_POINTER_REGNUM
)
3564 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3565 && xregno
== ARG_POINTER_REGNUM
)
3568 if (xregno
== STACK_POINTER_REGNUM
3569 /* We should convert hard stack register in LRA if it is
3571 && ! lra_in_progress
)
3574 /* Try to get the register offset. */
3575 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3576 if (!info
.representable_p
)
3579 /* Make sure that the offsetted register value is in range. */
3580 yregno
= xregno
+ info
.offset
;
3581 if (!HARD_REGISTER_NUM_P (yregno
))
3584 /* See whether (reg:YMODE YREGNO) is valid.
3586 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3587 This is a kludge to work around how complex FP arguments are passed
3588 on IA-64 and should be fixed. See PR target/49226. */
3589 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3590 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3593 return (int) yregno
;
3596 /* Return the final regno that a subreg expression refers to. */
3598 subreg_regno (const_rtx x
)
3601 rtx subreg
= SUBREG_REG (x
);
3602 int regno
= REGNO (subreg
);
3604 ret
= regno
+ subreg_regno_offset (regno
,
3612 /* Return the number of registers that a subreg expression refers
3615 subreg_nregs (const_rtx x
)
3617 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3620 /* Return the number of registers that a subreg REG with REGNO
3621 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3622 changed so that the regno can be passed in. */
3625 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3627 struct subreg_info info
;
3628 rtx subreg
= SUBREG_REG (x
);
3630 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3636 struct parms_set_data
3642 /* Helper function for noticing stores to parameter registers. */
3644 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3646 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3647 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3648 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3650 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3655 /* Look backward for first parameter to be loaded.
3656 Note that loads of all parameters will not necessarily be
3657 found if CSE has eliminated some of them (e.g., an argument
3658 to the outer function is passed down as a parameter).
3659 Do not skip BOUNDARY. */
3661 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
3663 struct parms_set_data parm
;
3665 rtx_insn
*before
, *first_set
;
3667 /* Since different machines initialize their parameter registers
3668 in different orders, assume nothing. Collect the set of all
3669 parameter registers. */
3670 CLEAR_HARD_REG_SET (parm
.regs
);
3672 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3673 if (GET_CODE (XEXP (p
, 0)) == USE
3674 && REG_P (XEXP (XEXP (p
, 0), 0)))
3676 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3678 /* We only care about registers which can hold function
3680 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3683 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3687 first_set
= call_insn
;
3689 /* Search backward for the first set of a register in this set. */
3690 while (parm
.nregs
&& before
!= boundary
)
3692 before
= PREV_INSN (before
);
3694 /* It is possible that some loads got CSEed from one call to
3695 another. Stop in that case. */
3696 if (CALL_P (before
))
3699 /* Our caller needs either ensure that we will find all sets
3700 (in case code has not been optimized yet), or take care
3701 for possible labels in a way by setting boundary to preceding
3703 if (LABEL_P (before
))
3705 gcc_assert (before
== boundary
);
3709 if (INSN_P (before
))
3711 int nregs_old
= parm
.nregs
;
3712 note_stores (PATTERN (before
), parms_set
, &parm
);
3713 /* If we found something that did not set a parameter reg,
3714 we're done. Do not keep going, as that might result
3715 in hoisting an insn before the setting of a pseudo
3716 that is used by the hoisted insn. */
3717 if (nregs_old
!= parm
.nregs
)
3726 /* Return true if we should avoid inserting code between INSN and preceding
3727 call instruction. */
3730 keep_with_call_p (const rtx_insn
*insn
)
3734 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
3736 if (REG_P (SET_DEST (set
))
3737 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
3738 && fixed_regs
[REGNO (SET_DEST (set
))]
3739 && general_operand (SET_SRC (set
), VOIDmode
))
3741 if (REG_P (SET_SRC (set
))
3742 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
3743 && REG_P (SET_DEST (set
))
3744 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3746 /* There may be a stack pop just after the call and before the store
3747 of the return register. Search for the actual store when deciding
3748 if we can break or not. */
3749 if (SET_DEST (set
) == stack_pointer_rtx
)
3751 /* This CONST_CAST is okay because next_nonnote_insn just
3752 returns its argument and we assign it to a const_rtx
3755 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
3756 if (i2
&& keep_with_call_p (i2
))
3763 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3764 to non-complex jumps. That is, direct unconditional, conditional,
3765 and tablejumps, but not computed jumps or returns. It also does
3766 not apply to the fallthru case of a conditional jump. */
3769 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
3771 rtx tmp
= JUMP_LABEL (jump_insn
);
3772 rtx_jump_table_data
*table
;
3777 if (tablejump_p (jump_insn
, NULL
, &table
))
3779 rtvec vec
= table
->get_labels ();
3780 int i
, veclen
= GET_NUM_ELEM (vec
);
3782 for (i
= 0; i
< veclen
; ++i
)
3783 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
3787 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
3794 /* Return an estimate of the cost of computing rtx X.
3795 One use is in cse, to decide which expression to keep in the hash table.
3796 Another is in rtl generation, to pick the cheapest way to multiply.
3797 Other uses like the latter are expected in the future.
3799 X appears as operand OPNO in an expression with code OUTER_CODE.
3800 SPEED specifies whether costs optimized for speed or size should
3804 rtx_cost (rtx x
, enum rtx_code outer_code
, int opno
, bool speed
)
3815 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3816 many insns, taking N times as long. */
3817 factor
= GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
;
3821 /* Compute the default costs of certain things.
3822 Note that targetm.rtx_costs can override the defaults. */
3824 code
= GET_CODE (x
);
3828 /* Multiplication has time-complexity O(N*N), where N is the
3829 number of units (translated from digits) when using
3830 schoolbook long multiplication. */
3831 total
= factor
* factor
* COSTS_N_INSNS (5);
3837 /* Similarly, complexity for schoolbook long division. */
3838 total
= factor
* factor
* COSTS_N_INSNS (7);
3841 /* Used in combine.c as a marker. */
3845 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3846 the mode for the factor. */
3847 factor
= GET_MODE_SIZE (GET_MODE (SET_DEST (x
))) / UNITS_PER_WORD
;
3852 total
= factor
* COSTS_N_INSNS (1);
3862 /* If we can't tie these modes, make this expensive. The larger
3863 the mode, the more expensive it is. */
3864 if (! MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (SUBREG_REG (x
))))
3865 return COSTS_N_INSNS (2 + factor
);
3869 if (targetm
.rtx_costs (x
, code
, outer_code
, opno
, &total
, speed
))
3874 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3875 which is already in total. */
3877 fmt
= GET_RTX_FORMAT (code
);
3878 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3880 total
+= rtx_cost (XEXP (x
, i
), code
, i
, speed
);
3881 else if (fmt
[i
] == 'E')
3882 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3883 total
+= rtx_cost (XVECEXP (x
, i
, j
), code
, i
, speed
);
3888 /* Fill in the structure C with information about both speed and size rtx
3889 costs for X, which is operand OPNO in an expression with code OUTER. */
3892 get_full_rtx_cost (rtx x
, enum rtx_code outer
, int opno
,
3893 struct full_rtx_costs
*c
)
3895 c
->speed
= rtx_cost (x
, outer
, opno
, true);
3896 c
->size
= rtx_cost (x
, outer
, opno
, false);
3900 /* Return cost of address expression X.
3901 Expect that X is properly formed address reference.
3903 SPEED parameter specify whether costs optimized for speed or size should
3907 address_cost (rtx x
, machine_mode mode
, addr_space_t as
, bool speed
)
3909 /* We may be asked for cost of various unusual addresses, such as operands
3910 of push instruction. It is not worthwhile to complicate writing
3911 of the target hook by such cases. */
3913 if (!memory_address_addr_space_p (mode
, x
, as
))
3916 return targetm
.address_cost (x
, mode
, as
, speed
);
3919 /* If the target doesn't override, compute the cost as with arithmetic. */
3922 default_address_cost (rtx x
, machine_mode
, addr_space_t
, bool speed
)
3924 return rtx_cost (x
, MEM
, 0, speed
);
3928 unsigned HOST_WIDE_INT
3929 nonzero_bits (const_rtx x
, machine_mode mode
)
3931 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3935 num_sign_bit_copies (const_rtx x
, machine_mode mode
)
3937 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3940 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3941 It avoids exponential behavior in nonzero_bits1 when X has
3942 identical subexpressions on the first or the second level. */
3944 static unsigned HOST_WIDE_INT
3945 cached_nonzero_bits (const_rtx x
, machine_mode mode
, const_rtx known_x
,
3946 machine_mode known_mode
,
3947 unsigned HOST_WIDE_INT known_ret
)
3949 if (x
== known_x
&& mode
== known_mode
)
3952 /* Try to find identical subexpressions. If found call
3953 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3954 precomputed value for the subexpression as KNOWN_RET. */
3956 if (ARITHMETIC_P (x
))
3958 rtx x0
= XEXP (x
, 0);
3959 rtx x1
= XEXP (x
, 1);
3961 /* Check the first level. */
3963 return nonzero_bits1 (x
, mode
, x0
, mode
,
3964 cached_nonzero_bits (x0
, mode
, known_x
,
3965 known_mode
, known_ret
));
3967 /* Check the second level. */
3968 if (ARITHMETIC_P (x0
)
3969 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
3970 return nonzero_bits1 (x
, mode
, x1
, mode
,
3971 cached_nonzero_bits (x1
, mode
, known_x
,
3972 known_mode
, known_ret
));
3974 if (ARITHMETIC_P (x1
)
3975 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
3976 return nonzero_bits1 (x
, mode
, x0
, mode
,
3977 cached_nonzero_bits (x0
, mode
, known_x
,
3978 known_mode
, known_ret
));
3981 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
3984 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3985 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3986 is less useful. We can't allow both, because that results in exponential
3987 run time recursion. There is a nullstone testcase that triggered
3988 this. This macro avoids accidental uses of num_sign_bit_copies. */
3989 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3991 /* Given an expression, X, compute which bits in X can be nonzero.
3992 We don't care about bits outside of those defined in MODE.
3994 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3995 an arithmetic operation, we can do better. */
3997 static unsigned HOST_WIDE_INT
3998 nonzero_bits1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
3999 machine_mode known_mode
,
4000 unsigned HOST_WIDE_INT known_ret
)
4002 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4003 unsigned HOST_WIDE_INT inner_nz
;
4005 machine_mode inner_mode
;
4006 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4008 /* For floating-point and vector values, assume all bits are needed. */
4009 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
4010 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4013 /* If X is wider than MODE, use its mode instead. */
4014 if (GET_MODE_PRECISION (GET_MODE (x
)) > mode_width
)
4016 mode
= GET_MODE (x
);
4017 nonzero
= GET_MODE_MASK (mode
);
4018 mode_width
= GET_MODE_PRECISION (mode
);
4021 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4022 /* Our only callers in this case look for single bit values. So
4023 just return the mode mask. Those tests will then be false. */
4026 #ifndef WORD_REGISTER_OPERATIONS
4027 /* If MODE is wider than X, but both are a single word for both the host
4028 and target machines, we can compute this from which bits of the
4029 object might be nonzero in its own mode, taking into account the fact
4030 that on many CISC machines, accessing an object in a wider mode
4031 causes the high-order bits to become undefined. So they are
4032 not known to be zero. */
4034 if (GET_MODE (x
) != VOIDmode
&& GET_MODE (x
) != mode
4035 && GET_MODE_PRECISION (GET_MODE (x
)) <= BITS_PER_WORD
4036 && GET_MODE_PRECISION (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
4037 && GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (GET_MODE (x
)))
4039 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
4040 known_x
, known_mode
, known_ret
);
4041 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
4046 code
= GET_CODE (x
);
4050 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4051 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4052 all the bits above ptr_mode are known to be zero. */
4053 /* As we do not know which address space the pointer is referring to,
4054 we can do this only if the target does not support different pointer
4055 or address modes depending on the address space. */
4056 if (target_default_pointer_address_modes_p ()
4057 && POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4059 nonzero
&= GET_MODE_MASK (ptr_mode
);
4062 /* Include declared information about alignment of pointers. */
4063 /* ??? We don't properly preserve REG_POINTER changes across
4064 pointer-to-integer casts, so we can't trust it except for
4065 things that we know must be pointers. See execute/960116-1.c. */
4066 if ((x
== stack_pointer_rtx
4067 || x
== frame_pointer_rtx
4068 || x
== arg_pointer_rtx
)
4069 && REGNO_POINTER_ALIGN (REGNO (x
)))
4071 unsigned HOST_WIDE_INT alignment
4072 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4074 #ifdef PUSH_ROUNDING
4075 /* If PUSH_ROUNDING is defined, it is possible for the
4076 stack to be momentarily aligned only to that amount,
4077 so we pick the least alignment. */
4078 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4079 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
4083 nonzero
&= ~(alignment
- 1);
4087 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4088 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
4089 known_mode
, known_ret
,
4093 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4094 known_mode
, known_ret
);
4096 return nonzero_for_hook
;
4100 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4101 /* If X is negative in MODE, sign-extend the value. */
4103 && mode_width
< BITS_PER_WORD
4104 && (UINTVAL (x
) & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
4106 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4112 #ifdef LOAD_EXTEND_OP
4113 /* In many, if not most, RISC machines, reading a byte from memory
4114 zeros the rest of the register. Noticing that fact saves a lot
4115 of extra zero-extends. */
4116 if (LOAD_EXTEND_OP (GET_MODE (x
)) == ZERO_EXTEND
)
4117 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
4122 case UNEQ
: case LTGT
:
4123 case GT
: case GTU
: case UNGT
:
4124 case LT
: case LTU
: case UNLT
:
4125 case GE
: case GEU
: case UNGE
:
4126 case LE
: case LEU
: case UNLE
:
4127 case UNORDERED
: case ORDERED
:
4128 /* If this produces an integer result, we know which bits are set.
4129 Code here used to clear bits outside the mode of X, but that is
4131 /* Mind that MODE is the mode the caller wants to look at this
4132 operation in, and not the actual operation mode. We can wind
4133 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4134 that describes the results of a vector compare. */
4135 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
4136 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4137 nonzero
= STORE_FLAG_VALUE
;
4142 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4143 and num_sign_bit_copies. */
4144 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4145 == GET_MODE_PRECISION (GET_MODE (x
)))
4149 if (GET_MODE_PRECISION (GET_MODE (x
)) < mode_width
)
4150 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
4155 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4156 and num_sign_bit_copies. */
4157 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4158 == GET_MODE_PRECISION (GET_MODE (x
)))
4164 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4165 known_x
, known_mode
, known_ret
)
4166 & GET_MODE_MASK (mode
));
4170 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4171 known_x
, known_mode
, known_ret
);
4172 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4173 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4177 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4178 Otherwise, show all the bits in the outer mode but not the inner
4180 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4181 known_x
, known_mode
, known_ret
);
4182 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4184 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4185 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4186 inner_nz
|= (GET_MODE_MASK (mode
)
4187 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4190 nonzero
&= inner_nz
;
4194 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4195 known_x
, known_mode
, known_ret
)
4196 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4197 known_x
, known_mode
, known_ret
);
4201 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4203 unsigned HOST_WIDE_INT nonzero0
4204 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4205 known_x
, known_mode
, known_ret
);
4207 /* Don't call nonzero_bits for the second time if it cannot change
4209 if ((nonzero
& nonzero0
) != nonzero
)
4211 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4212 known_x
, known_mode
, known_ret
);
4216 case PLUS
: case MINUS
:
4218 case DIV
: case UDIV
:
4219 case MOD
: case UMOD
:
4220 /* We can apply the rules of arithmetic to compute the number of
4221 high- and low-order zero bits of these operations. We start by
4222 computing the width (position of the highest-order nonzero bit)
4223 and the number of low-order zero bits for each value. */
4225 unsigned HOST_WIDE_INT nz0
4226 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4227 known_x
, known_mode
, known_ret
);
4228 unsigned HOST_WIDE_INT nz1
4229 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4230 known_x
, known_mode
, known_ret
);
4231 int sign_index
= GET_MODE_PRECISION (GET_MODE (x
)) - 1;
4232 int width0
= floor_log2 (nz0
) + 1;
4233 int width1
= floor_log2 (nz1
) + 1;
4234 int low0
= floor_log2 (nz0
& -nz0
);
4235 int low1
= floor_log2 (nz1
& -nz1
);
4236 unsigned HOST_WIDE_INT op0_maybe_minusp
4237 = nz0
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4238 unsigned HOST_WIDE_INT op1_maybe_minusp
4239 = nz1
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4240 unsigned int result_width
= mode_width
;
4246 result_width
= MAX (width0
, width1
) + 1;
4247 result_low
= MIN (low0
, low1
);
4250 result_low
= MIN (low0
, low1
);
4253 result_width
= width0
+ width1
;
4254 result_low
= low0
+ low1
;
4259 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4260 result_width
= width0
;
4265 result_width
= width0
;
4270 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4271 result_width
= MIN (width0
, width1
);
4272 result_low
= MIN (low0
, low1
);
4277 result_width
= MIN (width0
, width1
);
4278 result_low
= MIN (low0
, low1
);
4284 if (result_width
< mode_width
)
4285 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << result_width
) - 1;
4288 nonzero
&= ~(((unsigned HOST_WIDE_INT
) 1 << result_low
) - 1);
4293 if (CONST_INT_P (XEXP (x
, 1))
4294 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4295 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (x
, 1))) - 1;
4299 /* If this is a SUBREG formed for a promoted variable that has
4300 been zero-extended, we know that at least the high-order bits
4301 are zero, though others might be too. */
4303 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4304 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4305 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4306 known_x
, known_mode
, known_ret
);
4308 inner_mode
= GET_MODE (SUBREG_REG (x
));
4309 /* If the inner mode is a single word for both the host and target
4310 machines, we can compute this from which bits of the inner
4311 object might be nonzero. */
4312 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4313 && (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
))
4315 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4316 known_x
, known_mode
, known_ret
);
4318 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4319 /* If this is a typical RISC machine, we only have to worry
4320 about the way loads are extended. */
4321 if ((LOAD_EXTEND_OP (inner_mode
) == SIGN_EXTEND
4322 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4323 : LOAD_EXTEND_OP (inner_mode
) != ZERO_EXTEND
)
4324 || !MEM_P (SUBREG_REG (x
)))
4327 /* On many CISC machines, accessing an object in a wider mode
4328 causes the high-order bits to become undefined. So they are
4329 not known to be zero. */
4330 if (GET_MODE_PRECISION (GET_MODE (x
))
4331 > GET_MODE_PRECISION (inner_mode
))
4332 nonzero
|= (GET_MODE_MASK (GET_MODE (x
))
4333 & ~GET_MODE_MASK (inner_mode
));
4342 /* The nonzero bits are in two classes: any bits within MODE
4343 that aren't in GET_MODE (x) are always significant. The rest of the
4344 nonzero bits are those that are significant in the operand of
4345 the shift when shifted the appropriate number of bits. This
4346 shows that high-order bits are cleared by the right shift and
4347 low-order bits by left shifts. */
4348 if (CONST_INT_P (XEXP (x
, 1))
4349 && INTVAL (XEXP (x
, 1)) >= 0
4350 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4351 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4353 machine_mode inner_mode
= GET_MODE (x
);
4354 unsigned int width
= GET_MODE_PRECISION (inner_mode
);
4355 int count
= INTVAL (XEXP (x
, 1));
4356 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4357 unsigned HOST_WIDE_INT op_nonzero
4358 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4359 known_x
, known_mode
, known_ret
);
4360 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4361 unsigned HOST_WIDE_INT outer
= 0;
4363 if (mode_width
> width
)
4364 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4366 if (code
== LSHIFTRT
)
4368 else if (code
== ASHIFTRT
)
4372 /* If the sign bit may have been nonzero before the shift, we
4373 need to mark all the places it could have been copied to
4374 by the shift as possibly nonzero. */
4375 if (inner
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1 - count
)))
4376 inner
|= (((unsigned HOST_WIDE_INT
) 1 << count
) - 1)
4379 else if (code
== ASHIFT
)
4382 inner
= ((inner
<< (count
% width
)
4383 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4385 nonzero
&= (outer
| inner
);
4391 /* This is at most the number of bits in the mode. */
4392 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4396 /* If CLZ has a known value at zero, then the nonzero bits are
4397 that value, plus the number of bits in the mode minus one. */
4398 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4400 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4406 /* If CTZ has a known value at zero, then the nonzero bits are
4407 that value, plus the number of bits in the mode minus one. */
4408 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4410 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4416 /* This is at most the number of bits in the mode minus 1. */
4417 nonzero
= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4426 unsigned HOST_WIDE_INT nonzero_true
4427 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4428 known_x
, known_mode
, known_ret
);
4430 /* Don't call nonzero_bits for the second time if it cannot change
4432 if ((nonzero
& nonzero_true
) != nonzero
)
4433 nonzero
&= nonzero_true
4434 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4435 known_x
, known_mode
, known_ret
);
4446 /* See the macro definition above. */
4447 #undef cached_num_sign_bit_copies
4450 /* The function cached_num_sign_bit_copies is a wrapper around
4451 num_sign_bit_copies1. It avoids exponential behavior in
4452 num_sign_bit_copies1 when X has identical subexpressions on the
4453 first or the second level. */
4456 cached_num_sign_bit_copies (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4457 machine_mode known_mode
,
4458 unsigned int known_ret
)
4460 if (x
== known_x
&& mode
== known_mode
)
4463 /* Try to find identical subexpressions. If found call
4464 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4465 the precomputed value for the subexpression as KNOWN_RET. */
4467 if (ARITHMETIC_P (x
))
4469 rtx x0
= XEXP (x
, 0);
4470 rtx x1
= XEXP (x
, 1);
4472 /* Check the first level. */
4475 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4476 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4480 /* Check the second level. */
4481 if (ARITHMETIC_P (x0
)
4482 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4484 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4485 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4489 if (ARITHMETIC_P (x1
)
4490 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4492 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4493 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4498 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4501 /* Return the number of bits at the high-order end of X that are known to
4502 be equal to the sign bit. X will be used in mode MODE; if MODE is
4503 VOIDmode, X will be used in its own mode. The returned value will always
4504 be between 1 and the number of bits in MODE. */
4507 num_sign_bit_copies1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4508 machine_mode known_mode
,
4509 unsigned int known_ret
)
4511 enum rtx_code code
= GET_CODE (x
);
4512 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4513 int num0
, num1
, result
;
4514 unsigned HOST_WIDE_INT nonzero
;
4516 /* If we weren't given a mode, use the mode of X. If the mode is still
4517 VOIDmode, we don't know anything. Likewise if one of the modes is
4520 if (mode
== VOIDmode
)
4521 mode
= GET_MODE (x
);
4523 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4524 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4527 /* For a smaller object, just ignore the high bits. */
4528 if (bitwidth
< GET_MODE_PRECISION (GET_MODE (x
)))
4530 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4531 known_x
, known_mode
, known_ret
);
4533 num0
- (int) (GET_MODE_PRECISION (GET_MODE (x
)) - bitwidth
));
4536 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_PRECISION (GET_MODE (x
)))
4538 #ifndef WORD_REGISTER_OPERATIONS
4539 /* If this machine does not do all register operations on the entire
4540 register and MODE is wider than the mode of X, we can say nothing
4541 at all about the high-order bits. */
4544 /* Likewise on machines that do, if the mode of the object is smaller
4545 than a word and loads of that size don't sign extend, we can say
4546 nothing about the high order bits. */
4547 if (GET_MODE_PRECISION (GET_MODE (x
)) < BITS_PER_WORD
4548 #ifdef LOAD_EXTEND_OP
4549 && LOAD_EXTEND_OP (GET_MODE (x
)) != SIGN_EXTEND
4560 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4561 /* If pointers extend signed and this is a pointer in Pmode, say that
4562 all the bits above ptr_mode are known to be sign bit copies. */
4563 /* As we do not know which address space the pointer is referring to,
4564 we can do this only if the target does not support different pointer
4565 or address modes depending on the address space. */
4566 if (target_default_pointer_address_modes_p ()
4567 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4568 && mode
== Pmode
&& REG_POINTER (x
))
4569 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
4573 unsigned int copies_for_hook
= 1, copies
= 1;
4574 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4575 known_mode
, known_ret
,
4579 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4580 known_mode
, known_ret
);
4582 if (copies
> 1 || copies_for_hook
> 1)
4583 return MAX (copies
, copies_for_hook
);
4585 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4590 #ifdef LOAD_EXTEND_OP
4591 /* Some RISC machines sign-extend all loads of smaller than a word. */
4592 if (LOAD_EXTEND_OP (GET_MODE (x
)) == SIGN_EXTEND
)
4593 return MAX (1, ((int) bitwidth
4594 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1));
4599 /* If the constant is negative, take its 1's complement and remask.
4600 Then see how many zero bits we have. */
4601 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4602 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4603 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4604 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4606 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4609 /* If this is a SUBREG for a promoted object that is sign-extended
4610 and we are looking at it in a wider mode, we know that at least the
4611 high-order bits are known to be sign bit copies. */
4613 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
4615 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4616 known_x
, known_mode
, known_ret
);
4617 return MAX ((int) bitwidth
4618 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1,
4622 /* For a smaller object, just ignore the high bits. */
4623 if (bitwidth
<= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
))))
4625 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4626 known_x
, known_mode
, known_ret
);
4627 return MAX (1, (num0
4628 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
)))
4632 #ifdef WORD_REGISTER_OPERATIONS
4633 #ifdef LOAD_EXTEND_OP
4634 /* For paradoxical SUBREGs on machines where all register operations
4635 affect the entire register, just look inside. Note that we are
4636 passing MODE to the recursive call, so the number of sign bit copies
4637 will remain relative to that mode, not the inner mode. */
4639 /* This works only if loads sign extend. Otherwise, if we get a
4640 reload for the inner part, it may be loaded from the stack, and
4641 then we lose all sign bit copies that existed before the store
4644 if (paradoxical_subreg_p (x
)
4645 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4646 && MEM_P (SUBREG_REG (x
)))
4647 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4648 known_x
, known_mode
, known_ret
);
4654 if (CONST_INT_P (XEXP (x
, 1)))
4655 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
4659 return (bitwidth
- GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4660 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4661 known_x
, known_mode
, known_ret
));
4664 /* For a smaller object, just ignore the high bits. */
4665 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4666 known_x
, known_mode
, known_ret
);
4667 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4671 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4672 known_x
, known_mode
, known_ret
);
4674 case ROTATE
: case ROTATERT
:
4675 /* If we are rotating left by a number of bits less than the number
4676 of sign bit copies, we can just subtract that amount from the
4678 if (CONST_INT_P (XEXP (x
, 1))
4679 && INTVAL (XEXP (x
, 1)) >= 0
4680 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
4682 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4683 known_x
, known_mode
, known_ret
);
4684 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
4685 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
4690 /* In general, this subtracts one sign bit copy. But if the value
4691 is known to be positive, the number of sign bit copies is the
4692 same as that of the input. Finally, if the input has just one bit
4693 that might be nonzero, all the bits are copies of the sign bit. */
4694 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4695 known_x
, known_mode
, known_ret
);
4696 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4697 return num0
> 1 ? num0
- 1 : 1;
4699 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4704 && (((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
))
4709 case IOR
: case AND
: case XOR
:
4710 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
4711 /* Logical operations will preserve the number of sign-bit copies.
4712 MIN and MAX operations always return one of the operands. */
4713 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4714 known_x
, known_mode
, known_ret
);
4715 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4716 known_x
, known_mode
, known_ret
);
4718 /* If num1 is clearing some of the top bits then regardless of
4719 the other term, we are guaranteed to have at least that many
4720 high-order zero bits. */
4723 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4724 && CONST_INT_P (XEXP (x
, 1))
4725 && (UINTVAL (XEXP (x
, 1))
4726 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) == 0)
4729 /* Similarly for IOR when setting high-order bits. */
4732 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4733 && CONST_INT_P (XEXP (x
, 1))
4734 && (UINTVAL (XEXP (x
, 1))
4735 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4738 return MIN (num0
, num1
);
4740 case PLUS
: case MINUS
:
4741 /* For addition and subtraction, we can have a 1-bit carry. However,
4742 if we are subtracting 1 from a positive number, there will not
4743 be such a carry. Furthermore, if the positive number is known to
4744 be 0 or 1, we know the result is either -1 or 0. */
4746 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
4747 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
4749 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4750 if ((((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
) == 0)
4751 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
4752 : bitwidth
- floor_log2 (nonzero
) - 1);
4755 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4756 known_x
, known_mode
, known_ret
);
4757 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4758 known_x
, known_mode
, known_ret
);
4759 result
= MAX (1, MIN (num0
, num1
) - 1);
4764 /* The number of bits of the product is the sum of the number of
4765 bits of both terms. However, unless one of the terms if known
4766 to be positive, we must allow for an additional bit since negating
4767 a negative number can remove one sign bit copy. */
4769 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4770 known_x
, known_mode
, known_ret
);
4771 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4772 known_x
, known_mode
, known_ret
);
4774 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
4776 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4777 || (((nonzero_bits (XEXP (x
, 0), mode
)
4778 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4779 && ((nonzero_bits (XEXP (x
, 1), mode
)
4780 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)))
4784 return MAX (1, result
);
4787 /* The result must be <= the first operand. If the first operand
4788 has the high bit set, we know nothing about the number of sign
4790 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4792 else if ((nonzero_bits (XEXP (x
, 0), mode
)
4793 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4796 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4797 known_x
, known_mode
, known_ret
);
4800 /* The result must be <= the second operand. If the second operand
4801 has (or just might have) the high bit set, we know nothing about
4802 the number of sign bit copies. */
4803 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4805 else if ((nonzero_bits (XEXP (x
, 1), mode
)
4806 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4809 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4810 known_x
, known_mode
, known_ret
);
4813 /* Similar to unsigned division, except that we have to worry about
4814 the case where the divisor is negative, in which case we have
4816 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4817 known_x
, known_mode
, known_ret
);
4819 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4820 || (nonzero_bits (XEXP (x
, 1), mode
)
4821 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4827 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4828 known_x
, known_mode
, known_ret
);
4830 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4831 || (nonzero_bits (XEXP (x
, 1), mode
)
4832 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4838 /* Shifts by a constant add to the number of bits equal to the
4840 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4841 known_x
, known_mode
, known_ret
);
4842 if (CONST_INT_P (XEXP (x
, 1))
4843 && INTVAL (XEXP (x
, 1)) > 0
4844 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4845 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
4850 /* Left shifts destroy copies. */
4851 if (!CONST_INT_P (XEXP (x
, 1))
4852 || INTVAL (XEXP (x
, 1)) < 0
4853 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
4854 || INTVAL (XEXP (x
, 1)) >= GET_MODE_PRECISION (GET_MODE (x
)))
4857 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4858 known_x
, known_mode
, known_ret
);
4859 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
4862 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4863 known_x
, known_mode
, known_ret
);
4864 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
4865 known_x
, known_mode
, known_ret
);
4866 return MIN (num0
, num1
);
4868 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
4869 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
4870 case GEU
: case GTU
: case LEU
: case LTU
:
4871 case UNORDERED
: case ORDERED
:
4872 /* If the constant is negative, take its 1's complement and remask.
4873 Then see how many zero bits we have. */
4874 nonzero
= STORE_FLAG_VALUE
;
4875 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4876 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4877 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4879 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4885 /* If we haven't been able to figure it out by one of the above rules,
4886 see if some of the high-order bits are known to be zero. If so,
4887 count those bits and return one less than that amount. If we can't
4888 safely compute the mask for this mode, always return BITWIDTH. */
4890 bitwidth
= GET_MODE_PRECISION (mode
);
4891 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4894 nonzero
= nonzero_bits (x
, mode
);
4895 return nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))
4896 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
4899 /* Calculate the rtx_cost of a single instruction. A return value of
4900 zero indicates an instruction pattern without a known cost. */
4903 insn_rtx_cost (rtx pat
, bool speed
)
4908 /* Extract the single set rtx from the instruction pattern.
4909 We can't use single_set since we only have the pattern. */
4910 if (GET_CODE (pat
) == SET
)
4912 else if (GET_CODE (pat
) == PARALLEL
)
4915 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
4917 rtx x
= XVECEXP (pat
, 0, i
);
4918 if (GET_CODE (x
) == SET
)
4931 cost
= set_src_cost (SET_SRC (set
), speed
);
4932 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
4935 /* Returns estimate on cost of computing SEQ. */
4938 seq_cost (const rtx_insn
*seq
, bool speed
)
4943 for (; seq
; seq
= NEXT_INSN (seq
))
4945 set
= single_set (seq
);
4947 cost
+= set_rtx_cost (set
, speed
);
4955 /* Given an insn INSN and condition COND, return the condition in a
4956 canonical form to simplify testing by callers. Specifically:
4958 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4959 (2) Both operands will be machine operands; (cc0) will have been replaced.
4960 (3) If an operand is a constant, it will be the second operand.
4961 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4962 for GE, GEU, and LEU.
4964 If the condition cannot be understood, or is an inequality floating-point
4965 comparison which needs to be reversed, 0 will be returned.
4967 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4969 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4970 insn used in locating the condition was found. If a replacement test
4971 of the condition is desired, it should be placed in front of that
4972 insn and we will be sure that the inputs are still valid.
4974 If WANT_REG is nonzero, we wish the condition to be relative to that
4975 register, if possible. Therefore, do not canonicalize the condition
4976 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4977 to be a compare to a CC mode register.
4979 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4983 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
4984 rtx_insn
**earliest
,
4985 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
4988 rtx_insn
*prev
= insn
;
4992 int reverse_code
= 0;
4994 basic_block bb
= BLOCK_FOR_INSN (insn
);
4996 code
= GET_CODE (cond
);
4997 mode
= GET_MODE (cond
);
4998 op0
= XEXP (cond
, 0);
4999 op1
= XEXP (cond
, 1);
5002 code
= reversed_comparison_code (cond
, insn
);
5003 if (code
== UNKNOWN
)
5009 /* If we are comparing a register with zero, see if the register is set
5010 in the previous insn to a COMPARE or a comparison operation. Perform
5011 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5014 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5015 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5016 && op1
== CONST0_RTX (GET_MODE (op0
))
5019 /* Set nonzero when we find something of interest. */
5023 /* If comparison with cc0, import actual comparison from compare
5027 if ((prev
= prev_nonnote_insn (prev
)) == 0
5028 || !NONJUMP_INSN_P (prev
)
5029 || (set
= single_set (prev
)) == 0
5030 || SET_DEST (set
) != cc0_rtx
)
5033 op0
= SET_SRC (set
);
5034 op1
= CONST0_RTX (GET_MODE (op0
));
5040 /* If this is a COMPARE, pick up the two things being compared. */
5041 if (GET_CODE (op0
) == COMPARE
)
5043 op1
= XEXP (op0
, 1);
5044 op0
= XEXP (op0
, 0);
5047 else if (!REG_P (op0
))
5050 /* Go back to the previous insn. Stop if it is not an INSN. We also
5051 stop if it isn't a single set or if it has a REG_INC note because
5052 we don't want to bother dealing with it. */
5054 prev
= prev_nonnote_nondebug_insn (prev
);
5057 || !NONJUMP_INSN_P (prev
)
5058 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5059 /* In cfglayout mode, there do not have to be labels at the
5060 beginning of a block, or jumps at the end, so the previous
5061 conditions would not stop us when we reach bb boundary. */
5062 || BLOCK_FOR_INSN (prev
) != bb
)
5065 set
= set_of (op0
, prev
);
5068 && (GET_CODE (set
) != SET
5069 || !rtx_equal_p (SET_DEST (set
), op0
)))
5072 /* If this is setting OP0, get what it sets it to if it looks
5076 machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5077 #ifdef FLOAT_STORE_FLAG_VALUE
5078 REAL_VALUE_TYPE fsfv
;
5081 /* ??? We may not combine comparisons done in a CCmode with
5082 comparisons not done in a CCmode. This is to aid targets
5083 like Alpha that have an IEEE compliant EQ instruction, and
5084 a non-IEEE compliant BEQ instruction. The use of CCmode is
5085 actually artificial, simply to prevent the combination, but
5086 should not affect other platforms.
5088 However, we must allow VOIDmode comparisons to match either
5089 CCmode or non-CCmode comparison, because some ports have
5090 modeless comparisons inside branch patterns.
5092 ??? This mode check should perhaps look more like the mode check
5093 in simplify_comparison in combine. */
5094 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5095 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5097 && inner_mode
!= VOIDmode
)
5099 if (GET_CODE (SET_SRC (set
)) == COMPARE
5102 && val_signbit_known_set_p (inner_mode
,
5104 #ifdef FLOAT_STORE_FLAG_VALUE
5106 && SCALAR_FLOAT_MODE_P (inner_mode
)
5107 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5108 REAL_VALUE_NEGATIVE (fsfv
)))
5111 && COMPARISON_P (SET_SRC (set
))))
5113 else if (((code
== EQ
5115 && val_signbit_known_set_p (inner_mode
,
5117 #ifdef FLOAT_STORE_FLAG_VALUE
5119 && SCALAR_FLOAT_MODE_P (inner_mode
)
5120 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5121 REAL_VALUE_NEGATIVE (fsfv
)))
5124 && COMPARISON_P (SET_SRC (set
)))
5129 else if ((code
== EQ
|| code
== NE
)
5130 && GET_CODE (SET_SRC (set
)) == XOR
)
5131 /* Handle sequences like:
5134 ...(eq|ne op0 (const_int 0))...
5138 (eq op0 (const_int 0)) reduces to (eq X Y)
5139 (ne op0 (const_int 0)) reduces to (ne X Y)
5141 This is the form used by MIPS16, for example. */
5147 else if (reg_set_p (op0
, prev
))
5148 /* If this sets OP0, but not directly, we have to give up. */
5153 /* If the caller is expecting the condition to be valid at INSN,
5154 make sure X doesn't change before INSN. */
5155 if (valid_at_insn_p
)
5156 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5158 if (COMPARISON_P (x
))
5159 code
= GET_CODE (x
);
5162 code
= reversed_comparison_code (x
, prev
);
5163 if (code
== UNKNOWN
)
5168 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5174 /* If constant is first, put it last. */
5175 if (CONSTANT_P (op0
))
5176 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5178 /* If OP0 is the result of a comparison, we weren't able to find what
5179 was really being compared, so fail. */
5181 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5184 /* Canonicalize any ordered comparison with integers involving equality
5185 if we can do computations in the relevant mode and we do not
5188 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
5189 && CONST_INT_P (op1
)
5190 && GET_MODE (op0
) != VOIDmode
5191 && GET_MODE_PRECISION (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
5193 HOST_WIDE_INT const_val
= INTVAL (op1
);
5194 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5195 unsigned HOST_WIDE_INT max_val
5196 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
5201 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5202 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
5205 /* When cross-compiling, const_val might be sign-extended from
5206 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5208 if ((const_val
& max_val
)
5209 != ((unsigned HOST_WIDE_INT
) 1
5210 << (GET_MODE_PRECISION (GET_MODE (op0
)) - 1)))
5211 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
5215 if (uconst_val
< max_val
)
5216 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
5220 if (uconst_val
!= 0)
5221 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
5229 /* Never return CC0; return zero instead. */
5233 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5236 /* Given a jump insn JUMP, return the condition that will cause it to branch
5237 to its JUMP_LABEL. If the condition cannot be understood, or is an
5238 inequality floating-point comparison which needs to be reversed, 0 will
5241 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5242 insn used in locating the condition was found. If a replacement test
5243 of the condition is desired, it should be placed in front of that
5244 insn and we will be sure that the inputs are still valid. If EARLIEST
5245 is null, the returned condition will be valid at INSN.
5247 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5248 compare CC mode register.
5250 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5253 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5254 int valid_at_insn_p
)
5260 /* If this is not a standard conditional jump, we can't parse it. */
5262 || ! any_condjump_p (jump
))
5264 set
= pc_set (jump
);
5266 cond
= XEXP (SET_SRC (set
), 0);
5268 /* If this branches to JUMP_LABEL when the condition is false, reverse
5271 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5272 && LABEL_REF_LABEL (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
5274 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5275 allow_cc_mode
, valid_at_insn_p
);
5278 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5279 TARGET_MODE_REP_EXTENDED.
5281 Note that we assume that the property of
5282 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5283 narrower than mode B. I.e., if A is a mode narrower than B then in
5284 order to be able to operate on it in mode B, mode A needs to
5285 satisfy the requirements set by the representation of mode B. */
5288 init_num_sign_bit_copies_in_rep (void)
5290 machine_mode mode
, in_mode
;
5292 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
5293 in_mode
= GET_MODE_WIDER_MODE (mode
))
5294 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
5295 mode
= GET_MODE_WIDER_MODE (mode
))
5299 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5300 extends to the next widest mode. */
5301 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5302 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
5304 /* We are in in_mode. Count how many bits outside of mode
5305 have to be copies of the sign-bit. */
5306 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
5308 machine_mode wider
= GET_MODE_WIDER_MODE (i
);
5310 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5311 /* We can only check sign-bit copies starting from the
5312 top-bit. In order to be able to check the bits we
5313 have already seen we pretend that subsequent bits
5314 have to be sign-bit copies too. */
5315 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5316 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5317 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5322 /* Suppose that truncation from the machine mode of X to MODE is not a
5323 no-op. See if there is anything special about X so that we can
5324 assume it already contains a truncated value of MODE. */
5327 truncated_to_mode (machine_mode mode
, const_rtx x
)
5329 /* This register has already been used in MODE without explicit
5331 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5334 /* See if we already satisfy the requirements of MODE. If yes we
5335 can just switch to MODE. */
5336 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5337 && (num_sign_bit_copies (x
, GET_MODE (x
))
5338 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5344 /* Return true if RTX code CODE has a single sequence of zero or more
5345 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5346 entry in that case. */
5349 setup_reg_subrtx_bounds (unsigned int code
)
5351 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5353 for (; format
[i
] != 'e'; ++i
)
5356 /* No subrtxes. Leave start and count as 0. */
5358 if (format
[i
] == 'E' || format
[i
] == 'V')
5362 /* Record the sequence of 'e's. */
5363 rtx_all_subrtx_bounds
[code
].start
= i
;
5366 while (format
[i
] == 'e');
5367 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5368 /* rtl-iter.h relies on this. */
5369 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5371 for (; format
[i
]; ++i
)
5372 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5378 /* Initialize rtx_all_subrtx_bounds. */
5383 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5385 if (!setup_reg_subrtx_bounds (i
))
5386 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5387 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5388 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5391 init_num_sign_bit_copies_in_rep ();
5394 /* Check whether this is a constant pool constant. */
5396 constant_pool_constant_p (rtx x
)
5398 x
= avoid_constant_pool_reference (x
);
5399 return CONST_DOUBLE_P (x
);
5402 /* If M is a bitmask that selects a field of low-order bits within an item but
5403 not the entire word, return the length of the field. Return -1 otherwise.
5404 M is used in machine mode MODE. */
5407 low_bitmask_len (machine_mode mode
, unsigned HOST_WIDE_INT m
)
5409 if (mode
!= VOIDmode
)
5411 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
5413 m
&= GET_MODE_MASK (mode
);
5416 return exact_log2 (m
+ 1);
5419 /* Return the mode of MEM's address. */
5422 get_address_mode (rtx mem
)
5426 gcc_assert (MEM_P (mem
));
5427 mode
= GET_MODE (XEXP (mem
, 0));
5428 if (mode
!= VOIDmode
)
5430 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5433 /* Split up a CONST_DOUBLE or integer constant rtx
5434 into two rtx's for single words,
5435 storing in *FIRST the word that comes first in memory in the target
5436 and in *SECOND the other.
5438 TODO: This function needs to be rewritten to work on any size
5442 split_double (rtx value
, rtx
*first
, rtx
*second
)
5444 if (CONST_INT_P (value
))
5446 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5448 /* In this case the CONST_INT holds both target words.
5449 Extract the bits from it into two word-sized pieces.
5450 Sign extend each half to HOST_WIDE_INT. */
5451 unsigned HOST_WIDE_INT low
, high
;
5452 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5453 unsigned bits_per_word
= BITS_PER_WORD
;
5455 /* Set sign_bit to the most significant bit of a word. */
5457 sign_bit
<<= bits_per_word
- 1;
5459 /* Set mask so that all bits of the word are set. We could
5460 have used 1 << BITS_PER_WORD instead of basing the
5461 calculation on sign_bit. However, on machines where
5462 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5463 compiler warning, even though the code would never be
5465 mask
= sign_bit
<< 1;
5468 /* Set sign_extend as any remaining bits. */
5469 sign_extend
= ~mask
;
5471 /* Pick the lower word and sign-extend it. */
5472 low
= INTVAL (value
);
5477 /* Pick the higher word, shifted to the least significant
5478 bits, and sign-extend it. */
5479 high
= INTVAL (value
);
5480 high
>>= bits_per_word
- 1;
5483 if (high
& sign_bit
)
5484 high
|= sign_extend
;
5486 /* Store the words in the target machine order. */
5487 if (WORDS_BIG_ENDIAN
)
5489 *first
= GEN_INT (high
);
5490 *second
= GEN_INT (low
);
5494 *first
= GEN_INT (low
);
5495 *second
= GEN_INT (high
);
5500 /* The rule for using CONST_INT for a wider mode
5501 is that we regard the value as signed.
5502 So sign-extend it. */
5503 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
5504 if (WORDS_BIG_ENDIAN
)
5516 else if (GET_CODE (value
) == CONST_WIDE_INT
)
5518 /* All of this is scary code and needs to be converted to
5519 properly work with any size integer. */
5520 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
5521 if (WORDS_BIG_ENDIAN
)
5523 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5524 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5528 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5529 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5532 else if (!CONST_DOUBLE_P (value
))
5534 if (WORDS_BIG_ENDIAN
)
5536 *first
= const0_rtx
;
5542 *second
= const0_rtx
;
5545 else if (GET_MODE (value
) == VOIDmode
5546 /* This is the old way we did CONST_DOUBLE integers. */
5547 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
5549 /* In an integer, the words are defined as most and least significant.
5550 So order them by the target's convention. */
5551 if (WORDS_BIG_ENDIAN
)
5553 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5554 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
5558 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
5559 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5566 REAL_VALUE_FROM_CONST_DOUBLE (r
, value
);
5568 /* Note, this converts the REAL_VALUE_TYPE to the target's
5569 format, splits up the floating point double and outputs
5570 exactly 32 bits of it into each of l[0] and l[1] --
5571 not necessarily BITS_PER_WORD bits. */
5572 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
5574 /* If 32 bits is an entire word for the target, but not for the host,
5575 then sign-extend on the host so that the number will look the same
5576 way on the host that it would on the target. See for instance
5577 simplify_unary_operation. The #if is needed to avoid compiler
5580 #if HOST_BITS_PER_LONG > 32
5581 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
5583 if (l
[0] & ((long) 1 << 31))
5584 l
[0] |= ((long) (-1) << 32);
5585 if (l
[1] & ((long) 1 << 31))
5586 l
[1] |= ((long) (-1) << 32);
5590 *first
= GEN_INT (l
[0]);
5591 *second
= GEN_INT (l
[1]);
5595 /* Return true if X is a sign_extract or zero_extract from the least
5599 lsb_bitfield_op_p (rtx x
)
5601 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
5603 machine_mode mode
= GET_MODE (XEXP (x
, 0));
5604 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
5605 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
5607 return (pos
== (BITS_BIG_ENDIAN
? GET_MODE_PRECISION (mode
) - len
: 0));
5612 /* Strip outer address "mutations" from LOC and return a pointer to the
5613 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5614 stripped expression there.
5616 "Mutations" either convert between modes or apply some kind of
5617 extension, truncation or alignment. */
5620 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
5624 enum rtx_code code
= GET_CODE (*loc
);
5625 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
5626 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5627 used to convert between pointer sizes. */
5628 loc
= &XEXP (*loc
, 0);
5629 else if (lsb_bitfield_op_p (*loc
))
5630 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5631 acts as a combined truncation and extension. */
5632 loc
= &XEXP (*loc
, 0);
5633 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
5634 /* (and ... (const_int -X)) is used to align to X bytes. */
5635 loc
= &XEXP (*loc
, 0);
5636 else if (code
== SUBREG
5637 && !OBJECT_P (SUBREG_REG (*loc
))
5638 && subreg_lowpart_p (*loc
))
5639 /* (subreg (operator ...) ...) inside and is used for mode
5641 loc
= &SUBREG_REG (*loc
);
5649 /* Return true if CODE applies some kind of scale. The scaled value is
5650 is the first operand and the scale is the second. */
5653 binary_scale_code_p (enum rtx_code code
)
5655 return (code
== MULT
5657 /* Needed by ARM targets. */
5661 || code
== ROTATERT
);
5664 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5665 (see address_info). Return null otherwise. */
5668 get_base_term (rtx
*inner
)
5670 if (GET_CODE (*inner
) == LO_SUM
)
5671 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5674 || GET_CODE (*inner
) == SUBREG
5675 || GET_CODE (*inner
) == SCRATCH
)
5680 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5681 (see address_info). Return null otherwise. */
5684 get_index_term (rtx
*inner
)
5686 /* At present, only constant scales are allowed. */
5687 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
5688 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5691 || GET_CODE (*inner
) == SUBREG
5692 || GET_CODE (*inner
) == SCRATCH
)
5697 /* Set the segment part of address INFO to LOC, given that INNER is the
5701 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5703 gcc_assert (!info
->segment
);
5704 info
->segment
= loc
;
5705 info
->segment_term
= inner
;
5708 /* Set the base part of address INFO to LOC, given that INNER is the
5712 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5714 gcc_assert (!info
->base
);
5716 info
->base_term
= inner
;
5719 /* Set the index part of address INFO to LOC, given that INNER is the
5723 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5725 gcc_assert (!info
->index
);
5727 info
->index_term
= inner
;
5730 /* Set the displacement part of address INFO to LOC, given that INNER
5731 is the constant term. */
5734 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5736 gcc_assert (!info
->disp
);
5738 info
->disp_term
= inner
;
5741 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5742 rest of INFO accordingly. */
5745 decompose_incdec_address (struct address_info
*info
)
5747 info
->autoinc_p
= true;
5749 rtx
*base
= &XEXP (*info
->inner
, 0);
5750 set_address_base (info
, base
, base
);
5751 gcc_checking_assert (info
->base
== info
->base_term
);
5753 /* These addresses are only valid when the size of the addressed
5755 gcc_checking_assert (info
->mode
!= VOIDmode
);
5758 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5759 of INFO accordingly. */
5762 decompose_automod_address (struct address_info
*info
)
5764 info
->autoinc_p
= true;
5766 rtx
*base
= &XEXP (*info
->inner
, 0);
5767 set_address_base (info
, base
, base
);
5768 gcc_checking_assert (info
->base
== info
->base_term
);
5770 rtx plus
= XEXP (*info
->inner
, 1);
5771 gcc_assert (GET_CODE (plus
) == PLUS
);
5773 info
->base_term2
= &XEXP (plus
, 0);
5774 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
5776 rtx
*step
= &XEXP (plus
, 1);
5777 rtx
*inner_step
= strip_address_mutations (step
);
5778 if (CONSTANT_P (*inner_step
))
5779 set_address_disp (info
, step
, inner_step
);
5781 set_address_index (info
, step
, inner_step
);
5784 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5785 values in [PTR, END). Return a pointer to the end of the used array. */
5788 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
5791 if (GET_CODE (x
) == PLUS
)
5793 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
5794 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
5798 gcc_assert (ptr
!= end
);
5804 /* Evaluate the likelihood of X being a base or index value, returning
5805 positive if it is likely to be a base, negative if it is likely to be
5806 an index, and 0 if we can't tell. Make the magnitude of the return
5807 value reflect the amount of confidence we have in the answer.
5809 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5812 baseness (rtx x
, machine_mode mode
, addr_space_t as
,
5813 enum rtx_code outer_code
, enum rtx_code index_code
)
5815 /* Believe *_POINTER unless the address shape requires otherwise. */
5816 if (REG_P (x
) && REG_POINTER (x
))
5818 if (MEM_P (x
) && MEM_POINTER (x
))
5821 if (REG_P (x
) && HARD_REGISTER_P (x
))
5823 /* X is a hard register. If it only fits one of the base
5824 or index classes, choose that interpretation. */
5825 int regno
= REGNO (x
);
5826 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
5827 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
5828 if (base_p
!= index_p
)
5829 return base_p
? 1 : -1;
5834 /* INFO->INNER describes a normal, non-automodified address.
5835 Fill in the rest of INFO accordingly. */
5838 decompose_normal_address (struct address_info
*info
)
5840 /* Treat the address as the sum of up to four values. */
5842 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
5843 ops
+ ARRAY_SIZE (ops
)) - ops
;
5845 /* If there is more than one component, any base component is in a PLUS. */
5847 info
->base_outer_code
= PLUS
;
5849 /* Try to classify each sum operand now. Leave those that could be
5850 either a base or an index in OPS. */
5853 for (size_t in
= 0; in
< n_ops
; ++in
)
5856 rtx
*inner
= strip_address_mutations (loc
);
5857 if (CONSTANT_P (*inner
))
5858 set_address_disp (info
, loc
, inner
);
5859 else if (GET_CODE (*inner
) == UNSPEC
)
5860 set_address_segment (info
, loc
, inner
);
5863 /* The only other possibilities are a base or an index. */
5864 rtx
*base_term
= get_base_term (inner
);
5865 rtx
*index_term
= get_index_term (inner
);
5866 gcc_assert (base_term
|| index_term
);
5868 set_address_index (info
, loc
, index_term
);
5869 else if (!index_term
)
5870 set_address_base (info
, loc
, base_term
);
5873 gcc_assert (base_term
== index_term
);
5875 inner_ops
[out
] = base_term
;
5881 /* Classify the remaining OPS members as bases and indexes. */
5884 /* If we haven't seen a base or an index yet, assume that this is
5885 the base. If we were confident that another term was the base
5886 or index, treat the remaining operand as the other kind. */
5888 set_address_base (info
, ops
[0], inner_ops
[0]);
5890 set_address_index (info
, ops
[0], inner_ops
[0]);
5894 /* In the event of a tie, assume the base comes first. */
5895 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
5897 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
5898 GET_CODE (*ops
[0])))
5900 set_address_base (info
, ops
[0], inner_ops
[0]);
5901 set_address_index (info
, ops
[1], inner_ops
[1]);
5905 set_address_base (info
, ops
[1], inner_ops
[1]);
5906 set_address_index (info
, ops
[0], inner_ops
[0]);
5910 gcc_assert (out
== 0);
5913 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
5914 or VOIDmode if not known. AS is the address space associated with LOC.
5915 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
5918 decompose_address (struct address_info
*info
, rtx
*loc
, machine_mode mode
,
5919 addr_space_t as
, enum rtx_code outer_code
)
5921 memset (info
, 0, sizeof (*info
));
5924 info
->addr_outer_code
= outer_code
;
5926 info
->inner
= strip_address_mutations (loc
, &outer_code
);
5927 info
->base_outer_code
= outer_code
;
5928 switch (GET_CODE (*info
->inner
))
5934 decompose_incdec_address (info
);
5939 decompose_automod_address (info
);
5943 decompose_normal_address (info
);
5948 /* Describe address operand LOC in INFO. */
5951 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
5953 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
5956 /* Describe the address of MEM X in INFO. */
5959 decompose_mem_address (struct address_info
*info
, rtx x
)
5961 gcc_assert (MEM_P (x
));
5962 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
5963 MEM_ADDR_SPACE (x
), MEM
);
5966 /* Update INFO after a change to the address it describes. */
5969 update_address (struct address_info
*info
)
5971 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
5972 info
->addr_outer_code
);
5975 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
5976 more complicated than that. */
5979 get_index_scale (const struct address_info
*info
)
5981 rtx index
= *info
->index
;
5982 if (GET_CODE (index
) == MULT
5983 && CONST_INT_P (XEXP (index
, 1))
5984 && info
->index_term
== &XEXP (index
, 0))
5985 return INTVAL (XEXP (index
, 1));
5987 if (GET_CODE (index
) == ASHIFT
5988 && CONST_INT_P (XEXP (index
, 1))
5989 && info
->index_term
== &XEXP (index
, 0))
5990 return (HOST_WIDE_INT
) 1 << INTVAL (XEXP (index
, 1));
5992 if (info
->index
== info
->index_term
)
5998 /* Return the "index code" of INFO, in the form required by
6002 get_index_code (const struct address_info
*info
)
6005 return GET_CODE (*info
->index
);
6008 return GET_CODE (*info
->disp
);
6013 /* Return true if X contains a thread-local symbol. */
6016 tls_referenced_p (const_rtx x
)
6018 if (!targetm
.have_tls
)
6021 subrtx_iterator::array_type array
;
6022 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6023 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)