1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
28 #include "insn-config.h"
42 #include "basic-block.h"
48 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
49 #include "addresses.h"
52 /* Forward declarations */
53 static void set_of_1 (rtx
, const_rtx
, void *);
54 static bool covers_regno_p (const_rtx
, unsigned int);
55 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
56 static int computed_jump_p_1 (const_rtx
);
57 static void parms_set (rtx
, const_rtx
, void *);
59 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, machine_mode
,
60 const_rtx
, machine_mode
,
61 unsigned HOST_WIDE_INT
);
62 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, machine_mode
,
63 const_rtx
, machine_mode
,
64 unsigned HOST_WIDE_INT
);
65 static unsigned int cached_num_sign_bit_copies (const_rtx
, machine_mode
, const_rtx
,
68 static unsigned int num_sign_bit_copies1 (const_rtx
, machine_mode
, const_rtx
,
69 machine_mode
, unsigned int);
71 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
72 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
74 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
75 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
76 SIGN_EXTEND then while narrowing we also have to enforce the
77 representation and sign-extend the value to mode DESTINATION_REP.
79 If the value is already sign-extended to DESTINATION_REP mode we
80 can just switch to DESTINATION mode on it. For each pair of
81 integral modes SOURCE and DESTINATION, when truncating from SOURCE
82 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
83 contains the number of high-order bits in SOURCE that have to be
84 copies of the sign-bit so that we can do this mode-switch to
88 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
90 /* Store X into index I of ARRAY. ARRAY is known to have at least I
91 elements. Return the new base of ARRAY. */
94 typename
T::value_type
*
95 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
97 size_t i
, value_type x
)
99 if (base
== array
.stack
)
106 gcc_checking_assert (i
== LOCAL_ELEMS
);
107 vec_safe_grow (array
.heap
, i
+ 1);
108 base
= array
.heap
->address ();
109 memcpy (base
, array
.stack
, sizeof (array
.stack
));
110 base
[LOCAL_ELEMS
] = x
;
113 unsigned int length
= array
.heap
->length ();
116 gcc_checking_assert (base
== array
.heap
->address ());
122 gcc_checking_assert (i
== length
);
123 vec_safe_push (array
.heap
, x
);
124 return array
.heap
->address ();
128 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
129 number of elements added to the worklist. */
131 template <typename T
>
133 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
135 size_t end
, rtx_type x
)
137 enum rtx_code code
= GET_CODE (x
);
138 const char *format
= GET_RTX_FORMAT (code
);
139 size_t orig_end
= end
;
140 if (__builtin_expect (INSN_P (x
), false))
142 /* Put the pattern at the top of the queue, since that's what
143 we're likely to want most. It also allows for the SEQUENCE
145 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
146 if (format
[i
] == 'e')
148 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
149 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
152 base
= add_single_to_queue (array
, base
, end
++, subx
);
156 for (int i
= 0; format
[i
]; ++i
)
157 if (format
[i
] == 'e')
159 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
160 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
163 base
= add_single_to_queue (array
, base
, end
++, subx
);
165 else if (format
[i
] == 'E')
167 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
168 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
169 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
170 for (unsigned int j
= 0; j
< length
; j
++)
171 base
[end
++] = T::get_value (vec
[j
]);
173 for (unsigned int j
= 0; j
< length
; j
++)
174 base
= add_single_to_queue (array
, base
, end
++,
175 T::get_value (vec
[j
]));
176 if (code
== SEQUENCE
&& end
== length
)
177 /* If the subrtxes of the sequence fill the entire array then
178 we know that no other parts of a containing insn are queued.
179 The caller is therefore iterating over the sequence as a
180 PATTERN (...), so we also want the patterns of the
182 for (unsigned int j
= 0; j
< length
; j
++)
184 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
186 base
[j
] = T::get_value (PATTERN (x
));
189 return end
- orig_end
;
192 template <typename T
>
194 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
196 vec_free (array
.heap
);
199 template <typename T
>
200 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
202 template class generic_subrtx_iterator
<const_rtx_accessor
>;
203 template class generic_subrtx_iterator
<rtx_var_accessor
>;
204 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
206 /* Return 1 if the value of X is unstable
207 (would be different at a different point in the program).
208 The frame pointer, arg pointer, etc. are considered stable
209 (within one function) and so is anything marked `unchanging'. */
212 rtx_unstable_p (const_rtx x
)
214 const RTX_CODE code
= GET_CODE (x
);
221 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
230 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
231 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
232 /* The arg pointer varies if it is not a fixed register. */
233 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
235 /* ??? When call-clobbered, the value is stable modulo the restore
236 that must happen after a call. This currently screws up local-alloc
237 into believing that the restore is not needed. */
238 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
243 if (MEM_VOLATILE_P (x
))
252 fmt
= GET_RTX_FORMAT (code
);
253 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
256 if (rtx_unstable_p (XEXP (x
, i
)))
259 else if (fmt
[i
] == 'E')
262 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
263 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
270 /* Return 1 if X has a value that can vary even between two
271 executions of the program. 0 means X can be compared reliably
272 against certain constants or near-constants.
273 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
274 zero, we are slightly more conservative.
275 The frame pointer and the arg pointer are considered constant. */
278 rtx_varies_p (const_rtx x
, bool for_alias
)
291 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
300 /* Note that we have to test for the actual rtx used for the frame
301 and arg pointers and not just the register number in case we have
302 eliminated the frame and/or arg pointer and are using it
304 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
305 /* The arg pointer varies if it is not a fixed register. */
306 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
308 if (x
== pic_offset_table_rtx
309 /* ??? When call-clobbered, the value is stable modulo the restore
310 that must happen after a call. This currently screws up
311 local-alloc into believing that the restore is not needed, so we
312 must return 0 only if we are called from alias analysis. */
313 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
318 /* The operand 0 of a LO_SUM is considered constant
319 (in fact it is related specifically to operand 1)
320 during alias analysis. */
321 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
322 || rtx_varies_p (XEXP (x
, 1), for_alias
);
325 if (MEM_VOLATILE_P (x
))
334 fmt
= GET_RTX_FORMAT (code
);
335 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
338 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
341 else if (fmt
[i
] == 'E')
344 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
345 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
352 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
353 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
354 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
355 references on strict alignment machines. */
358 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
359 machine_mode mode
, bool unaligned_mems
)
361 enum rtx_code code
= GET_CODE (x
);
363 /* The offset must be a multiple of the mode size if we are considering
364 unaligned memory references on strict alignment machines. */
365 if (STRICT_ALIGNMENT
&& unaligned_mems
&& GET_MODE_SIZE (mode
) != 0)
367 HOST_WIDE_INT actual_offset
= offset
;
369 #ifdef SPARC_STACK_BOUNDARY_HACK
370 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
371 the real alignment of %sp. However, when it does this, the
372 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
373 if (SPARC_STACK_BOUNDARY_HACK
374 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
375 actual_offset
-= STACK_POINTER_OFFSET
;
378 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
385 if (SYMBOL_REF_WEAK (x
))
387 if (!CONSTANT_POOL_ADDRESS_P (x
))
390 HOST_WIDE_INT decl_size
;
395 size
= GET_MODE_SIZE (mode
);
399 /* If the size of the access or of the symbol is unknown,
401 decl
= SYMBOL_REF_DECL (x
);
403 /* Else check that the access is in bounds. TODO: restructure
404 expr_size/tree_expr_size/int_expr_size and just use the latter. */
407 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
408 decl_size
= (tree_fits_shwi_p (DECL_SIZE_UNIT (decl
))
409 ? tree_to_shwi (DECL_SIZE_UNIT (decl
))
411 else if (TREE_CODE (decl
) == STRING_CST
)
412 decl_size
= TREE_STRING_LENGTH (decl
);
413 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
414 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
418 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
427 /* Stack references are assumed not to trap, but we need to deal with
428 nonsensical offsets. */
429 if (x
== frame_pointer_rtx
)
431 HOST_WIDE_INT adj_offset
= offset
- STARTING_FRAME_OFFSET
;
433 size
= GET_MODE_SIZE (mode
);
434 if (FRAME_GROWS_DOWNWARD
)
436 if (adj_offset
< frame_offset
|| adj_offset
+ size
- 1 >= 0)
441 if (adj_offset
< 0 || adj_offset
+ size
- 1 >= frame_offset
)
446 /* ??? Need to add a similar guard for nonsensical offsets. */
447 if (x
== hard_frame_pointer_rtx
448 || x
== stack_pointer_rtx
449 /* The arg pointer varies if it is not a fixed register. */
450 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
452 /* All of the virtual frame registers are stack references. */
453 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
454 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
459 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
460 mode
, unaligned_mems
);
463 /* An address is assumed not to trap if:
464 - it is the pic register plus a constant. */
465 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
468 /* - or it is an address that can't trap plus a constant integer. */
469 if (CONST_INT_P (XEXP (x
, 1))
470 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
471 size
, mode
, unaligned_mems
))
478 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
479 mode
, unaligned_mems
);
486 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
487 mode
, unaligned_mems
);
493 /* If it isn't one of the case above, it can cause a trap. */
497 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
500 rtx_addr_can_trap_p (const_rtx x
)
502 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
505 /* Return true if X is an address that is known to not be zero. */
508 nonzero_address_p (const_rtx x
)
510 const enum rtx_code code
= GET_CODE (x
);
515 return !SYMBOL_REF_WEAK (x
);
521 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
522 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
523 || x
== stack_pointer_rtx
524 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
526 /* All of the virtual frame registers are stack references. */
527 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
528 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
533 return nonzero_address_p (XEXP (x
, 0));
536 /* Handle PIC references. */
537 if (XEXP (x
, 0) == pic_offset_table_rtx
538 && CONSTANT_P (XEXP (x
, 1)))
543 /* Similar to the above; allow positive offsets. Further, since
544 auto-inc is only allowed in memories, the register must be a
546 if (CONST_INT_P (XEXP (x
, 1))
547 && INTVAL (XEXP (x
, 1)) > 0)
549 return nonzero_address_p (XEXP (x
, 0));
552 /* Similarly. Further, the offset is always positive. */
559 return nonzero_address_p (XEXP (x
, 0));
562 return nonzero_address_p (XEXP (x
, 1));
568 /* If it isn't one of the case above, might be zero. */
572 /* Return 1 if X refers to a memory location whose address
573 cannot be compared reliably with constant addresses,
574 or if X refers to a BLKmode memory object.
575 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
576 zero, we are slightly more conservative. */
579 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
590 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
592 fmt
= GET_RTX_FORMAT (code
);
593 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
596 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
599 else if (fmt
[i
] == 'E')
602 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
603 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
609 /* Return the CALL in X if there is one. */
612 get_call_rtx_from (rtx x
)
616 if (GET_CODE (x
) == PARALLEL
)
617 x
= XVECEXP (x
, 0, 0);
618 if (GET_CODE (x
) == SET
)
620 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
625 /* Return the value of the integer term in X, if one is apparent;
627 Only obvious integer terms are detected.
628 This is used in cse.c with the `related_value' field. */
631 get_integer_term (const_rtx x
)
633 if (GET_CODE (x
) == CONST
)
636 if (GET_CODE (x
) == MINUS
637 && CONST_INT_P (XEXP (x
, 1)))
638 return - INTVAL (XEXP (x
, 1));
639 if (GET_CODE (x
) == PLUS
640 && CONST_INT_P (XEXP (x
, 1)))
641 return INTVAL (XEXP (x
, 1));
645 /* If X is a constant, return the value sans apparent integer term;
647 Only obvious integer terms are detected. */
650 get_related_value (const_rtx x
)
652 if (GET_CODE (x
) != CONST
)
655 if (GET_CODE (x
) == PLUS
656 && CONST_INT_P (XEXP (x
, 1)))
658 else if (GET_CODE (x
) == MINUS
659 && CONST_INT_P (XEXP (x
, 1)))
664 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
665 to somewhere in the same object or object_block as SYMBOL. */
668 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
672 if (GET_CODE (symbol
) != SYMBOL_REF
)
680 if (CONSTANT_POOL_ADDRESS_P (symbol
)
681 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
684 decl
= SYMBOL_REF_DECL (symbol
);
685 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
689 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
690 && SYMBOL_REF_BLOCK (symbol
)
691 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
692 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
693 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
699 /* Split X into a base and a constant offset, storing them in *BASE_OUT
700 and *OFFSET_OUT respectively. */
703 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
705 if (GET_CODE (x
) == CONST
)
708 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
710 *base_out
= XEXP (x
, 0);
711 *offset_out
= XEXP (x
, 1);
716 *offset_out
= const0_rtx
;
719 /* Return the number of places FIND appears within X. If COUNT_DEST is
720 zero, we do not count occurrences inside the destination of a SET. */
723 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
727 const char *format_ptr
;
746 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
748 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
752 if (MEM_P (find
) && rtx_equal_p (x
, find
))
757 if (SET_DEST (x
) == find
&& ! count_dest
)
758 return count_occurrences (SET_SRC (x
), find
, count_dest
);
765 format_ptr
= GET_RTX_FORMAT (code
);
768 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
770 switch (*format_ptr
++)
773 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
777 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
778 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
786 /* Return TRUE if OP is a register or subreg of a register that
787 holds an unsigned quantity. Otherwise, return FALSE. */
790 unsigned_reg_p (rtx op
)
794 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
797 if (GET_CODE (op
) == SUBREG
798 && SUBREG_PROMOTED_SIGN (op
))
805 /* Nonzero if register REG appears somewhere within IN.
806 Also works if REG is not a register; in this case it checks
807 for a subexpression of IN that is Lisp "equal" to REG. */
810 reg_mentioned_p (const_rtx reg
, const_rtx in
)
822 if (GET_CODE (in
) == LABEL_REF
)
823 return reg
== LABEL_REF_LABEL (in
);
825 code
= GET_CODE (in
);
829 /* Compare registers by number. */
831 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
833 /* These codes have no constituent expressions
841 /* These are kept unique for a given value. */
848 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
851 fmt
= GET_RTX_FORMAT (code
);
853 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
858 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
859 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
862 else if (fmt
[i
] == 'e'
863 && reg_mentioned_p (reg
, XEXP (in
, i
)))
869 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
870 no CODE_LABEL insn. */
873 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
878 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
884 /* Nonzero if register REG is used in an insn between
885 FROM_INSN and TO_INSN (exclusive of those two). */
888 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
889 const rtx_insn
*to_insn
)
893 if (from_insn
== to_insn
)
896 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
897 if (NONDEBUG_INSN_P (insn
)
898 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
899 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
904 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
905 is entirely replaced by a new value and the only use is as a SET_DEST,
906 we do not consider it a reference. */
909 reg_referenced_p (const_rtx x
, const_rtx body
)
913 switch (GET_CODE (body
))
916 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
919 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
920 of a REG that occupies all of the REG, the insn references X if
921 it is mentioned in the destination. */
922 if (GET_CODE (SET_DEST (body
)) != CC0
923 && GET_CODE (SET_DEST (body
)) != PC
924 && !REG_P (SET_DEST (body
))
925 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
926 && REG_P (SUBREG_REG (SET_DEST (body
)))
927 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
928 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
929 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
930 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
931 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
936 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
937 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
944 return reg_overlap_mentioned_p (x
, body
);
947 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
950 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
953 case UNSPEC_VOLATILE
:
954 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
955 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
960 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
961 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
966 if (MEM_P (XEXP (body
, 0)))
967 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
972 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
974 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
981 /* Nonzero if register REG is set or clobbered in an insn between
982 FROM_INSN and TO_INSN (exclusive of those two). */
985 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
986 const rtx_insn
*to_insn
)
988 const rtx_insn
*insn
;
990 if (from_insn
== to_insn
)
993 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
994 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
999 /* Internals of reg_set_between_p. */
1001 reg_set_p (const_rtx reg
, const_rtx insn
)
1003 /* We can be passed an insn or part of one. If we are passed an insn,
1004 check if a side-effect of the insn clobbers REG. */
1006 && (FIND_REG_INC_NOTE (insn
, reg
)
1009 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1010 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
1011 GET_MODE (reg
), REGNO (reg
)))
1013 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1016 return set_of (reg
, insn
) != NULL_RTX
;
1019 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1020 only if none of them are modified between START and END. Return 1 if
1021 X contains a MEM; this routine does use memory aliasing. */
1024 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1026 const enum rtx_code code
= GET_CODE (x
);
1047 if (modified_between_p (XEXP (x
, 0), start
, end
))
1049 if (MEM_READONLY_P (x
))
1051 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1052 if (memory_modified_in_insn_p (x
, insn
))
1058 return reg_set_between_p (x
, start
, end
);
1064 fmt
= GET_RTX_FORMAT (code
);
1065 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1067 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1070 else if (fmt
[i
] == 'E')
1071 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1072 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1079 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1080 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1081 does use memory aliasing. */
1084 modified_in_p (const_rtx x
, const_rtx insn
)
1086 const enum rtx_code code
= GET_CODE (x
);
1103 if (modified_in_p (XEXP (x
, 0), insn
))
1105 if (MEM_READONLY_P (x
))
1107 if (memory_modified_in_insn_p (x
, insn
))
1113 return reg_set_p (x
, insn
);
1119 fmt
= GET_RTX_FORMAT (code
);
1120 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1122 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1125 else if (fmt
[i
] == 'E')
1126 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1127 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1134 /* Helper function for set_of. */
1142 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1144 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1145 if (rtx_equal_p (x
, data
->pat
)
1146 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1150 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1151 (either directly or via STRICT_LOW_PART and similar modifiers). */
1153 set_of (const_rtx pat
, const_rtx insn
)
1155 struct set_of_data data
;
1156 data
.found
= NULL_RTX
;
1158 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1162 /* Add all hard register in X to *PSET. */
1164 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1166 subrtx_iterator::array_type array
;
1167 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1169 const_rtx x
= *iter
;
1170 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1171 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1175 /* This function, called through note_stores, collects sets and
1176 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1179 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1181 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1182 if (REG_P (x
) && HARD_REGISTER_P (x
))
1183 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1186 /* Examine INSN, and compute the set of hard registers written by it.
1187 Store it in *PSET. Should only be called after reload. */
1189 find_all_hard_reg_sets (const_rtx insn
, HARD_REG_SET
*pset
, bool implicit
)
1193 CLEAR_HARD_REG_SET (*pset
);
1194 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1198 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1200 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1201 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1203 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1204 if (REG_NOTE_KIND (link
) == REG_INC
)
1205 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1208 /* Like record_hard_reg_sets, but called through note_uses. */
1210 record_hard_reg_uses (rtx
*px
, void *data
)
1212 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1215 /* Given an INSN, return a SET expression if this insn has only a single SET.
1216 It may also have CLOBBERs, USEs, or SET whose output
1217 will not be used, which we ignore. */
1220 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1223 int set_verified
= 1;
1226 if (GET_CODE (pat
) == PARALLEL
)
1228 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1230 rtx sub
= XVECEXP (pat
, 0, i
);
1231 switch (GET_CODE (sub
))
1238 /* We can consider insns having multiple sets, where all
1239 but one are dead as single set insns. In common case
1240 only single set is present in the pattern so we want
1241 to avoid checking for REG_UNUSED notes unless necessary.
1243 When we reach set first time, we just expect this is
1244 the single set we are looking for and only when more
1245 sets are found in the insn, we check them. */
1248 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1249 && !side_effects_p (set
))
1255 set
= sub
, set_verified
= 0;
1256 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1257 || side_effects_p (sub
))
1269 /* Given an INSN, return nonzero if it has more than one SET, else return
1273 multiple_sets (const_rtx insn
)
1278 /* INSN must be an insn. */
1279 if (! INSN_P (insn
))
1282 /* Only a PARALLEL can have multiple SETs. */
1283 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1285 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1286 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1288 /* If we have already found a SET, then return now. */
1296 /* Either zero or one SET. */
1300 /* Return nonzero if the destination of SET equals the source
1301 and there are no side effects. */
1304 set_noop_p (const_rtx set
)
1306 rtx src
= SET_SRC (set
);
1307 rtx dst
= SET_DEST (set
);
1309 if (dst
== pc_rtx
&& src
== pc_rtx
)
1312 if (MEM_P (dst
) && MEM_P (src
))
1313 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1315 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1316 return rtx_equal_p (XEXP (dst
, 0), src
)
1317 && ! BYTES_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1318 && !side_effects_p (src
);
1320 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1321 dst
= XEXP (dst
, 0);
1323 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1325 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1327 src
= SUBREG_REG (src
);
1328 dst
= SUBREG_REG (dst
);
1331 /* It is a NOOP if destination overlaps with selected src vector
1333 if (GET_CODE (src
) == VEC_SELECT
1334 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1335 && HARD_REGISTER_P (XEXP (src
, 0))
1336 && HARD_REGISTER_P (dst
))
1339 rtx par
= XEXP (src
, 1);
1340 rtx src0
= XEXP (src
, 0);
1341 int c0
= INTVAL (XVECEXP (par
, 0, 0));
1342 HOST_WIDE_INT offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1344 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1345 if (INTVAL (XVECEXP (par
, 0, i
)) != c0
+ i
)
1348 simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1349 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1352 return (REG_P (src
) && REG_P (dst
)
1353 && REGNO (src
) == REGNO (dst
));
1356 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1360 noop_move_p (const_rtx insn
)
1362 rtx pat
= PATTERN (insn
);
1364 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1367 /* Insns carrying these notes are useful later on. */
1368 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1371 /* Check the code to be executed for COND_EXEC. */
1372 if (GET_CODE (pat
) == COND_EXEC
)
1373 pat
= COND_EXEC_CODE (pat
);
1375 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1378 if (GET_CODE (pat
) == PARALLEL
)
1381 /* If nothing but SETs of registers to themselves,
1382 this insn can also be deleted. */
1383 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1385 rtx tem
= XVECEXP (pat
, 0, i
);
1387 if (GET_CODE (tem
) == USE
1388 || GET_CODE (tem
) == CLOBBER
)
1391 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1401 /* Return nonzero if register in range [REGNO, ENDREGNO)
1402 appears either explicitly or implicitly in X
1403 other than being stored into.
1405 References contained within the substructure at LOC do not count.
1406 LOC may be zero, meaning don't ignore anything. */
1409 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1413 unsigned int x_regno
;
1418 /* The contents of a REG_NONNEG note is always zero, so we must come here
1419 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1423 code
= GET_CODE (x
);
1428 x_regno
= REGNO (x
);
1430 /* If we modifying the stack, frame, or argument pointer, it will
1431 clobber a virtual register. In fact, we could be more precise,
1432 but it isn't worth it. */
1433 if ((x_regno
== STACK_POINTER_REGNUM
1434 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1435 || x_regno
== ARG_POINTER_REGNUM
1437 || x_regno
== FRAME_POINTER_REGNUM
)
1438 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1441 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1444 /* If this is a SUBREG of a hard reg, we can see exactly which
1445 registers are being modified. Otherwise, handle normally. */
1446 if (REG_P (SUBREG_REG (x
))
1447 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1449 unsigned int inner_regno
= subreg_regno (x
);
1450 unsigned int inner_endregno
1451 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1452 ? subreg_nregs (x
) : 1);
1454 return endregno
> inner_regno
&& regno
< inner_endregno
;
1460 if (&SET_DEST (x
) != loc
1461 /* Note setting a SUBREG counts as referring to the REG it is in for
1462 a pseudo but not for hard registers since we can
1463 treat each word individually. */
1464 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1465 && loc
!= &SUBREG_REG (SET_DEST (x
))
1466 && REG_P (SUBREG_REG (SET_DEST (x
)))
1467 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1468 && refers_to_regno_p (regno
, endregno
,
1469 SUBREG_REG (SET_DEST (x
)), loc
))
1470 || (!REG_P (SET_DEST (x
))
1471 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1474 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1483 /* X does not match, so try its subexpressions. */
1485 fmt
= GET_RTX_FORMAT (code
);
1486 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1488 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1496 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1499 else if (fmt
[i
] == 'E')
1502 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1503 if (loc
!= &XVECEXP (x
, i
, j
)
1504 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1511 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1512 we check if any register number in X conflicts with the relevant register
1513 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1514 contains a MEM (we don't bother checking for memory addresses that can't
1515 conflict because we expect this to be a rare case. */
1518 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1520 unsigned int regno
, endregno
;
1522 /* If either argument is a constant, then modifying X can not
1523 affect IN. Here we look at IN, we can profitably combine
1524 CONSTANT_P (x) with the switch statement below. */
1525 if (CONSTANT_P (in
))
1529 switch (GET_CODE (x
))
1531 case STRICT_LOW_PART
:
1534 /* Overly conservative. */
1539 regno
= REGNO (SUBREG_REG (x
));
1540 if (regno
< FIRST_PSEUDO_REGISTER
)
1541 regno
= subreg_regno (x
);
1542 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1543 ? subreg_nregs (x
) : 1);
1548 endregno
= END_REGNO (x
);
1550 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1560 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1561 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1564 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1567 else if (fmt
[i
] == 'E')
1570 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1571 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1581 return reg_mentioned_p (x
, in
);
1587 /* If any register in here refers to it we return true. */
1588 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1589 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1590 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1596 gcc_assert (CONSTANT_P (x
));
1601 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1602 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1603 ignored by note_stores, but passed to FUN.
1605 FUN receives three arguments:
1606 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1607 2. the SET or CLOBBER rtx that does the store,
1608 3. the pointer DATA provided to note_stores.
1610 If the item being stored in or clobbered is a SUBREG of a hard register,
1611 the SUBREG will be passed. */
1614 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1618 if (GET_CODE (x
) == COND_EXEC
)
1619 x
= COND_EXEC_CODE (x
);
1621 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1623 rtx dest
= SET_DEST (x
);
1625 while ((GET_CODE (dest
) == SUBREG
1626 && (!REG_P (SUBREG_REG (dest
))
1627 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1628 || GET_CODE (dest
) == ZERO_EXTRACT
1629 || GET_CODE (dest
) == STRICT_LOW_PART
)
1630 dest
= XEXP (dest
, 0);
1632 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1633 each of whose first operand is a register. */
1634 if (GET_CODE (dest
) == PARALLEL
)
1636 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1637 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1638 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1641 (*fun
) (dest
, x
, data
);
1644 else if (GET_CODE (x
) == PARALLEL
)
1645 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1646 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1649 /* Like notes_stores, but call FUN for each expression that is being
1650 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1651 FUN for each expression, not any interior subexpressions. FUN receives a
1652 pointer to the expression and the DATA passed to this function.
1654 Note that this is not quite the same test as that done in reg_referenced_p
1655 since that considers something as being referenced if it is being
1656 partially set, while we do not. */
1659 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1664 switch (GET_CODE (body
))
1667 (*fun
) (&COND_EXEC_TEST (body
), data
);
1668 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1672 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1673 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1677 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1678 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1682 (*fun
) (&XEXP (body
, 0), data
);
1686 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1687 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1691 (*fun
) (&TRAP_CONDITION (body
), data
);
1695 (*fun
) (&XEXP (body
, 0), data
);
1699 case UNSPEC_VOLATILE
:
1700 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1701 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1705 if (MEM_P (XEXP (body
, 0)))
1706 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1711 rtx dest
= SET_DEST (body
);
1713 /* For sets we replace everything in source plus registers in memory
1714 expression in store and operands of a ZERO_EXTRACT. */
1715 (*fun
) (&SET_SRC (body
), data
);
1717 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1719 (*fun
) (&XEXP (dest
, 1), data
);
1720 (*fun
) (&XEXP (dest
, 2), data
);
1723 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1724 dest
= XEXP (dest
, 0);
1727 (*fun
) (&XEXP (dest
, 0), data
);
1732 /* All the other possibilities never store. */
1733 (*fun
) (pbody
, data
);
1738 /* Return nonzero if X's old contents don't survive after INSN.
1739 This will be true if X is (cc0) or if X is a register and
1740 X dies in INSN or because INSN entirely sets X.
1742 "Entirely set" means set directly and not through a SUBREG, or
1743 ZERO_EXTRACT, so no trace of the old contents remains.
1744 Likewise, REG_INC does not count.
1746 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1747 but for this use that makes no difference, since regs don't overlap
1748 during their lifetimes. Therefore, this function may be used
1749 at any time after deaths have been computed.
1751 If REG is a hard reg that occupies multiple machine registers, this
1752 function will only return 1 if each of those registers will be replaced
1756 dead_or_set_p (const_rtx insn
, const_rtx x
)
1758 unsigned int regno
, end_regno
;
1761 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1762 if (GET_CODE (x
) == CC0
)
1765 gcc_assert (REG_P (x
));
1768 end_regno
= END_REGNO (x
);
1769 for (i
= regno
; i
< end_regno
; i
++)
1770 if (! dead_or_set_regno_p (insn
, i
))
1776 /* Return TRUE iff DEST is a register or subreg of a register and
1777 doesn't change the number of words of the inner register, and any
1778 part of the register is TEST_REGNO. */
1781 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
1783 unsigned int regno
, endregno
;
1785 if (GET_CODE (dest
) == SUBREG
1786 && (((GET_MODE_SIZE (GET_MODE (dest
))
1787 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
1788 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
1789 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
1790 dest
= SUBREG_REG (dest
);
1795 regno
= REGNO (dest
);
1796 endregno
= END_REGNO (dest
);
1797 return (test_regno
>= regno
&& test_regno
< endregno
);
1800 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1801 any member matches the covers_regno_no_parallel_p criteria. */
1804 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
1806 if (GET_CODE (dest
) == PARALLEL
)
1808 /* Some targets place small structures in registers for return
1809 values of functions, and those registers are wrapped in
1810 PARALLELs that we may see as the destination of a SET. */
1813 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1815 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
1816 if (inner
!= NULL_RTX
1817 && covers_regno_no_parallel_p (inner
, test_regno
))
1824 return covers_regno_no_parallel_p (dest
, test_regno
);
1827 /* Utility function for dead_or_set_p to check an individual register. */
1830 dead_or_set_regno_p (const_rtx insn
, unsigned int test_regno
)
1834 /* See if there is a death note for something that includes TEST_REGNO. */
1835 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
1839 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
1842 pattern
= PATTERN (insn
);
1844 /* If a COND_EXEC is not executed, the value survives. */
1845 if (GET_CODE (pattern
) == COND_EXEC
)
1848 if (GET_CODE (pattern
) == SET
)
1849 return covers_regno_p (SET_DEST (pattern
), test_regno
);
1850 else if (GET_CODE (pattern
) == PARALLEL
)
1854 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
1856 rtx body
= XVECEXP (pattern
, 0, i
);
1858 if (GET_CODE (body
) == COND_EXEC
)
1859 body
= COND_EXEC_CODE (body
);
1861 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
1862 && covers_regno_p (SET_DEST (body
), test_regno
))
1870 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1871 If DATUM is nonzero, look for one whose datum is DATUM. */
1874 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
1878 gcc_checking_assert (insn
);
1880 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1881 if (! INSN_P (insn
))
1885 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1886 if (REG_NOTE_KIND (link
) == kind
)
1891 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1892 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
1897 /* Return the reg-note of kind KIND in insn INSN which applies to register
1898 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1899 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1900 it might be the case that the note overlaps REGNO. */
1903 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
1907 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1908 if (! INSN_P (insn
))
1911 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1912 if (REG_NOTE_KIND (link
) == kind
1913 /* Verify that it is a register, so that scratch and MEM won't cause a
1915 && REG_P (XEXP (link
, 0))
1916 && REGNO (XEXP (link
, 0)) <= regno
1917 && END_REGNO (XEXP (link
, 0)) > regno
)
1922 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1926 find_reg_equal_equiv_note (const_rtx insn
)
1933 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1934 if (REG_NOTE_KIND (link
) == REG_EQUAL
1935 || REG_NOTE_KIND (link
) == REG_EQUIV
)
1937 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1938 insns that have multiple sets. Checking single_set to
1939 make sure of this is not the proper check, as explained
1940 in the comment in set_unique_reg_note.
1942 This should be changed into an assert. */
1943 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
1950 /* Check whether INSN is a single_set whose source is known to be
1951 equivalent to a constant. Return that constant if so, otherwise
1955 find_constant_src (const rtx_insn
*insn
)
1959 set
= single_set (insn
);
1962 x
= avoid_constant_pool_reference (SET_SRC (set
));
1967 note
= find_reg_equal_equiv_note (insn
);
1968 if (note
&& CONSTANT_P (XEXP (note
, 0)))
1969 return XEXP (note
, 0);
1974 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1975 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1978 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
1980 /* If it's not a CALL_INSN, it can't possibly have a
1981 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1991 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
1993 link
= XEXP (link
, 1))
1994 if (GET_CODE (XEXP (link
, 0)) == code
1995 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
2000 unsigned int regno
= REGNO (datum
);
2002 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2003 to pseudo registers, so don't bother checking. */
2005 if (regno
< FIRST_PSEUDO_REGISTER
)
2007 unsigned int end_regno
= END_HARD_REGNO (datum
);
2010 for (i
= regno
; i
< end_regno
; i
++)
2011 if (find_regno_fusage (insn
, code
, i
))
2019 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2020 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2023 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2027 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2028 to pseudo registers, so don't bother checking. */
2030 if (regno
>= FIRST_PSEUDO_REGISTER
2034 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2038 if (GET_CODE (op
= XEXP (link
, 0)) == code
2039 && REG_P (reg
= XEXP (op
, 0))
2040 && REGNO (reg
) <= regno
2041 && END_HARD_REGNO (reg
) > regno
)
2049 /* Return true if KIND is an integer REG_NOTE. */
2052 int_reg_note_p (enum reg_note kind
)
2054 return kind
== REG_BR_PROB
;
2057 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2058 stored as the pointer to the next register note. */
2061 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2065 gcc_checking_assert (!int_reg_note_p (kind
));
2070 case REG_LABEL_TARGET
:
2071 case REG_LABEL_OPERAND
:
2073 /* These types of register notes use an INSN_LIST rather than an
2074 EXPR_LIST, so that copying is done right and dumps look
2076 note
= alloc_INSN_LIST (datum
, list
);
2077 PUT_REG_NOTE_KIND (note
, kind
);
2081 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2088 /* Add register note with kind KIND and datum DATUM to INSN. */
2091 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2093 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2096 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2099 add_int_reg_note (rtx insn
, enum reg_note kind
, int datum
)
2101 gcc_checking_assert (int_reg_note_p (kind
));
2102 REG_NOTES (insn
) = gen_rtx_INT_LIST ((machine_mode
) kind
,
2103 datum
, REG_NOTES (insn
));
2106 /* Add a register note like NOTE to INSN. */
2109 add_shallow_copy_of_reg_note (rtx insn
, rtx note
)
2111 if (GET_CODE (note
) == INT_LIST
)
2112 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2114 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2117 /* Remove register note NOTE from the REG_NOTES of INSN. */
2120 remove_note (rtx insn
, const_rtx note
)
2124 if (note
== NULL_RTX
)
2127 if (REG_NOTES (insn
) == note
)
2128 REG_NOTES (insn
) = XEXP (note
, 1);
2130 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2131 if (XEXP (link
, 1) == note
)
2133 XEXP (link
, 1) = XEXP (note
, 1);
2137 switch (REG_NOTE_KIND (note
))
2141 df_notes_rescan (as_a
<rtx_insn
*> (insn
));
2148 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2151 remove_reg_equal_equiv_notes (rtx insn
)
2155 loc
= ®_NOTES (insn
);
2158 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2159 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2160 *loc
= XEXP (*loc
, 1);
2162 loc
= &XEXP (*loc
, 1);
2166 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2169 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2176 /* This loop is a little tricky. We cannot just go down the chain because
2177 it is being modified by some actions in the loop. So we just iterate
2178 over the head. We plan to drain the list anyway. */
2179 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2181 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2182 rtx note
= find_reg_equal_equiv_note (insn
);
2184 /* This assert is generally triggered when someone deletes a REG_EQUAL
2185 or REG_EQUIV note by hacking the list manually rather than calling
2189 remove_note (insn
, note
);
2193 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2194 return 1 if it is found. A simple equality test is used to determine if
2198 in_expr_list_p (const_rtx listp
, const_rtx node
)
2202 for (x
= listp
; x
; x
= XEXP (x
, 1))
2203 if (node
== XEXP (x
, 0))
2209 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2210 remove that entry from the list if it is found.
2212 A simple equality test is used to determine if NODE matches. */
2215 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2217 rtx_expr_list
*temp
= *listp
;
2218 rtx prev
= NULL_RTX
;
2222 if (node
== temp
->element ())
2224 /* Splice the node out of the list. */
2226 XEXP (prev
, 1) = temp
->next ();
2228 *listp
= temp
->next ();
2234 temp
= temp
->next ();
2238 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2239 remove that entry from the list if it is found.
2241 A simple equality test is used to determine if NODE matches. */
2244 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2246 rtx_insn_list
*temp
= *listp
;
2251 if (node
== temp
->insn ())
2253 /* Splice the node out of the list. */
2255 XEXP (prev
, 1) = temp
->next ();
2257 *listp
= temp
->next ();
2263 temp
= temp
->next ();
2267 /* Nonzero if X contains any volatile instructions. These are instructions
2268 which may cause unpredictable machine state instructions, and thus no
2269 instructions or register uses should be moved or combined across them.
2270 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2273 volatile_insn_p (const_rtx x
)
2275 const RTX_CODE code
= GET_CODE (x
);
2293 case UNSPEC_VOLATILE
:
2298 if (MEM_VOLATILE_P (x
))
2305 /* Recursively scan the operands of this expression. */
2308 const char *const fmt
= GET_RTX_FORMAT (code
);
2311 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2315 if (volatile_insn_p (XEXP (x
, i
)))
2318 else if (fmt
[i
] == 'E')
2321 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2322 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2330 /* Nonzero if X contains any volatile memory references
2331 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2334 volatile_refs_p (const_rtx x
)
2336 const RTX_CODE code
= GET_CODE (x
);
2352 case UNSPEC_VOLATILE
:
2358 if (MEM_VOLATILE_P (x
))
2365 /* Recursively scan the operands of this expression. */
2368 const char *const fmt
= GET_RTX_FORMAT (code
);
2371 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2375 if (volatile_refs_p (XEXP (x
, i
)))
2378 else if (fmt
[i
] == 'E')
2381 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2382 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2390 /* Similar to above, except that it also rejects register pre- and post-
2394 side_effects_p (const_rtx x
)
2396 const RTX_CODE code
= GET_CODE (x
);
2413 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2414 when some combination can't be done. If we see one, don't think
2415 that we can simplify the expression. */
2416 return (GET_MODE (x
) != VOIDmode
);
2425 case UNSPEC_VOLATILE
:
2431 if (MEM_VOLATILE_P (x
))
2438 /* Recursively scan the operands of this expression. */
2441 const char *fmt
= GET_RTX_FORMAT (code
);
2444 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2448 if (side_effects_p (XEXP (x
, i
)))
2451 else if (fmt
[i
] == 'E')
2454 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2455 if (side_effects_p (XVECEXP (x
, i
, j
)))
2463 /* Return nonzero if evaluating rtx X might cause a trap.
2464 FLAGS controls how to consider MEMs. A nonzero means the context
2465 of the access may have changed from the original, such that the
2466 address may have become invalid. */
2469 may_trap_p_1 (const_rtx x
, unsigned flags
)
2475 /* We make no distinction currently, but this function is part of
2476 the internal target-hooks ABI so we keep the parameter as
2477 "unsigned flags". */
2478 bool code_changed
= flags
!= 0;
2482 code
= GET_CODE (x
);
2485 /* Handle these cases quickly. */
2497 return targetm
.unspec_may_trap_p (x
, flags
);
2499 case UNSPEC_VOLATILE
:
2505 return MEM_VOLATILE_P (x
);
2507 /* Memory ref can trap unless it's a static var or a stack slot. */
2509 /* Recognize specific pattern of stack checking probes. */
2510 if (flag_stack_check
2511 && MEM_VOLATILE_P (x
)
2512 && XEXP (x
, 0) == stack_pointer_rtx
)
2514 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2515 reference; moving it out of context such as when moving code
2516 when optimizing, might cause its address to become invalid. */
2518 || !MEM_NOTRAP_P (x
))
2520 HOST_WIDE_INT size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : 0;
2521 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2522 GET_MODE (x
), code_changed
);
2527 /* Division by a non-constant might trap. */
2532 if (HONOR_SNANS (x
))
2534 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2535 return flag_trapping_math
;
2536 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2541 /* An EXPR_LIST is used to represent a function call. This
2542 certainly may trap. */
2551 /* Some floating point comparisons may trap. */
2552 if (!flag_trapping_math
)
2554 /* ??? There is no machine independent way to check for tests that trap
2555 when COMPARE is used, though many targets do make this distinction.
2556 For instance, sparc uses CCFPE for compares which generate exceptions
2557 and CCFP for compares which do not generate exceptions. */
2560 /* But often the compare has some CC mode, so check operand
2562 if (HONOR_NANS (XEXP (x
, 0))
2563 || HONOR_NANS (XEXP (x
, 1)))
2569 if (HONOR_SNANS (x
))
2571 /* Often comparison is CC mode, so check operand modes. */
2572 if (HONOR_SNANS (XEXP (x
, 0))
2573 || HONOR_SNANS (XEXP (x
, 1)))
2578 /* Conversion of floating point might trap. */
2579 if (flag_trapping_math
&& HONOR_NANS (XEXP (x
, 0)))
2586 /* These operations don't trap even with floating point. */
2590 /* Any floating arithmetic may trap. */
2591 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2595 fmt
= GET_RTX_FORMAT (code
);
2596 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2600 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2603 else if (fmt
[i
] == 'E')
2606 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2607 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2614 /* Return nonzero if evaluating rtx X might cause a trap. */
2617 may_trap_p (const_rtx x
)
2619 return may_trap_p_1 (x
, 0);
2622 /* Same as above, but additionally return nonzero if evaluating rtx X might
2623 cause a fault. We define a fault for the purpose of this function as a
2624 erroneous execution condition that cannot be encountered during the normal
2625 execution of a valid program; the typical example is an unaligned memory
2626 access on a strict alignment machine. The compiler guarantees that it
2627 doesn't generate code that will fault from a valid program, but this
2628 guarantee doesn't mean anything for individual instructions. Consider
2629 the following example:
2631 struct S { int d; union { char *cp; int *ip; }; };
2633 int foo(struct S *s)
2641 on a strict alignment machine. In a valid program, foo will never be
2642 invoked on a structure for which d is equal to 1 and the underlying
2643 unique field of the union not aligned on a 4-byte boundary, but the
2644 expression *s->ip might cause a fault if considered individually.
2646 At the RTL level, potentially problematic expressions will almost always
2647 verify may_trap_p; for example, the above dereference can be emitted as
2648 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2649 However, suppose that foo is inlined in a caller that causes s->cp to
2650 point to a local character variable and guarantees that s->d is not set
2651 to 1; foo may have been effectively translated into pseudo-RTL as:
2654 (set (reg:SI) (mem:SI (%fp - 7)))
2656 (set (reg:QI) (mem:QI (%fp - 7)))
2658 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2659 memory reference to a stack slot, but it will certainly cause a fault
2660 on a strict alignment machine. */
2663 may_trap_or_fault_p (const_rtx x
)
2665 return may_trap_p_1 (x
, 1);
2668 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2669 i.e., an inequality. */
2672 inequality_comparisons_p (const_rtx x
)
2676 const enum rtx_code code
= GET_CODE (x
);
2704 len
= GET_RTX_LENGTH (code
);
2705 fmt
= GET_RTX_FORMAT (code
);
2707 for (i
= 0; i
< len
; i
++)
2711 if (inequality_comparisons_p (XEXP (x
, i
)))
2714 else if (fmt
[i
] == 'E')
2717 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2718 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2726 /* Replace any occurrence of FROM in X with TO. The function does
2727 not enter into CONST_DOUBLE for the replace.
2729 Note that copying is not done so X must not be shared unless all copies
2730 are to be modified. */
2733 replace_rtx (rtx x
, rtx from
, rtx to
)
2741 /* Allow this function to make replacements in EXPR_LISTs. */
2745 if (GET_CODE (x
) == SUBREG
)
2747 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
);
2749 if (CONST_INT_P (new_rtx
))
2751 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
2752 GET_MODE (SUBREG_REG (x
)),
2757 SUBREG_REG (x
) = new_rtx
;
2761 else if (GET_CODE (x
) == ZERO_EXTEND
)
2763 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
);
2765 if (CONST_INT_P (new_rtx
))
2767 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
2768 new_rtx
, GET_MODE (XEXP (x
, 0)));
2772 XEXP (x
, 0) = new_rtx
;
2777 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2778 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2781 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
);
2782 else if (fmt
[i
] == 'E')
2783 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2784 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
), from
, to
);
2790 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
2791 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
2794 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
2796 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
2798 if (JUMP_TABLE_DATA_P (x
))
2801 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
2802 int len
= GET_NUM_ELEM (vec
);
2803 for (int i
= 0; i
< len
; ++i
)
2805 rtx ref
= RTVEC_ELT (vec
, i
);
2806 if (XEXP (ref
, 0) == old_label
)
2808 XEXP (ref
, 0) = new_label
;
2809 if (update_label_nuses
)
2811 ++LABEL_NUSES (new_label
);
2812 --LABEL_NUSES (old_label
);
2819 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2820 field. This is not handled by the iterator because it doesn't
2821 handle unprinted ('0') fields. */
2822 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
2823 JUMP_LABEL (x
) = new_label
;
2825 subrtx_ptr_iterator::array_type array
;
2826 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
2831 if (GET_CODE (x
) == SYMBOL_REF
2832 && CONSTANT_POOL_ADDRESS_P (x
))
2834 rtx c
= get_pool_constant (x
);
2835 if (rtx_referenced_p (old_label
, c
))
2837 /* Create a copy of constant C; replace the label inside
2838 but do not update LABEL_NUSES because uses in constant pool
2840 rtx new_c
= copy_rtx (c
);
2841 replace_label (&new_c
, old_label
, new_label
, false);
2843 /* Add the new constant NEW_C to constant pool and replace
2844 the old reference to constant by new reference. */
2845 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
2846 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
2850 if ((GET_CODE (x
) == LABEL_REF
2851 || GET_CODE (x
) == INSN_LIST
)
2852 && XEXP (x
, 0) == old_label
)
2854 XEXP (x
, 0) = new_label
;
2855 if (update_label_nuses
)
2857 ++LABEL_NUSES (new_label
);
2858 --LABEL_NUSES (old_label
);
2866 replace_label_in_insn (rtx_insn
*insn
, rtx old_label
, rtx new_label
,
2867 bool update_label_nuses
)
2869 rtx insn_as_rtx
= insn
;
2870 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
2871 gcc_checking_assert (insn_as_rtx
== insn
);
2874 /* Return true if X is referenced in BODY. */
2877 rtx_referenced_p (const_rtx x
, const_rtx body
)
2879 subrtx_iterator::array_type array
;
2880 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
2881 if (const_rtx y
= *iter
)
2883 /* Check if a label_ref Y refers to label X. */
2884 if (GET_CODE (y
) == LABEL_REF
2886 && LABEL_REF_LABEL (y
) == x
)
2889 if (rtx_equal_p (x
, y
))
2892 /* If Y is a reference to pool constant traverse the constant. */
2893 if (GET_CODE (y
) == SYMBOL_REF
2894 && CONSTANT_POOL_ADDRESS_P (y
))
2895 iter
.substitute (get_pool_constant (y
));
2900 /* If INSN is a tablejump return true and store the label (before jump table) to
2901 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2904 tablejump_p (const rtx_insn
*insn
, rtx
*labelp
, rtx_jump_table_data
**tablep
)
2911 label
= JUMP_LABEL (insn
);
2912 if (label
!= NULL_RTX
&& !ANY_RETURN_P (label
)
2913 && (table
= NEXT_INSN (as_a
<rtx_insn
*> (label
))) != NULL_RTX
2914 && JUMP_TABLE_DATA_P (table
))
2919 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
2925 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2926 constant that is not in the constant pool and not in the condition
2927 of an IF_THEN_ELSE. */
2930 computed_jump_p_1 (const_rtx x
)
2932 const enum rtx_code code
= GET_CODE (x
);
2949 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
2950 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
2953 return (computed_jump_p_1 (XEXP (x
, 1))
2954 || computed_jump_p_1 (XEXP (x
, 2)));
2960 fmt
= GET_RTX_FORMAT (code
);
2961 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2964 && computed_jump_p_1 (XEXP (x
, i
)))
2967 else if (fmt
[i
] == 'E')
2968 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2969 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
2976 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2978 Tablejumps and casesi insns are not considered indirect jumps;
2979 we can recognize them by a (use (label_ref)). */
2982 computed_jump_p (const_rtx insn
)
2987 rtx pat
= PATTERN (insn
);
2989 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2990 if (JUMP_LABEL (insn
) != NULL
)
2993 if (GET_CODE (pat
) == PARALLEL
)
2995 int len
= XVECLEN (pat
, 0);
2996 int has_use_labelref
= 0;
2998 for (i
= len
- 1; i
>= 0; i
--)
2999 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
3000 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3003 has_use_labelref
= 1;
3007 if (! has_use_labelref
)
3008 for (i
= len
- 1; i
>= 0; i
--)
3009 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3010 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3011 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3014 else if (GET_CODE (pat
) == SET
3015 && SET_DEST (pat
) == pc_rtx
3016 && computed_jump_p_1 (SET_SRC (pat
)))
3024 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3025 the equivalent add insn and pass the result to FN, using DATA as the
3029 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3031 rtx x
= XEXP (mem
, 0);
3032 switch (GET_CODE (x
))
3037 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3038 rtx r1
= XEXP (x
, 0);
3039 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3040 return fn (mem
, x
, r1
, r1
, c
, data
);
3046 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3047 rtx r1
= XEXP (x
, 0);
3048 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3049 return fn (mem
, x
, r1
, r1
, c
, data
);
3055 rtx r1
= XEXP (x
, 0);
3056 rtx add
= XEXP (x
, 1);
3057 return fn (mem
, x
, r1
, add
, NULL
, data
);
3065 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3066 For each such autoinc operation found, call FN, passing it
3067 the innermost enclosing MEM, the operation itself, the RTX modified
3068 by the operation, two RTXs (the second may be NULL) that, once
3069 added, represent the value to be held by the modified RTX
3070 afterwards, and DATA. FN is to return 0 to continue the
3071 traversal or any other value to have it returned to the caller of
3072 for_each_inc_dec. */
3075 for_each_inc_dec (rtx x
,
3076 for_each_inc_dec_fn fn
,
3079 subrtx_var_iterator::array_type array
;
3080 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3085 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3087 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3090 iter
.skip_subrtxes ();
3097 /* Searches X for any reference to REGNO, returning the rtx of the
3098 reference found if any. Otherwise, returns NULL_RTX. */
3101 regno_use_in (unsigned int regno
, rtx x
)
3107 if (REG_P (x
) && REGNO (x
) == regno
)
3110 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3111 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3115 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3118 else if (fmt
[i
] == 'E')
3119 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3120 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3127 /* Return a value indicating whether OP, an operand of a commutative
3128 operation, is preferred as the first or second operand. The higher
3129 the value, the stronger the preference for being the first operand.
3130 We use negative values to indicate a preference for the first operand
3131 and positive values for the second operand. */
3134 commutative_operand_precedence (rtx op
)
3136 enum rtx_code code
= GET_CODE (op
);
3138 /* Constants always come the second operand. Prefer "nice" constants. */
3139 if (code
== CONST_INT
)
3141 if (code
== CONST_WIDE_INT
)
3143 if (code
== CONST_DOUBLE
)
3145 if (code
== CONST_FIXED
)
3147 op
= avoid_constant_pool_reference (op
);
3148 code
= GET_CODE (op
);
3150 switch (GET_RTX_CLASS (code
))
3153 if (code
== CONST_INT
)
3155 if (code
== CONST_WIDE_INT
)
3157 if (code
== CONST_DOUBLE
)
3159 if (code
== CONST_FIXED
)
3164 /* SUBREGs of objects should come second. */
3165 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3170 /* Complex expressions should be the first, so decrease priority
3171 of objects. Prefer pointer objects over non pointer objects. */
3172 if ((REG_P (op
) && REG_POINTER (op
))
3173 || (MEM_P (op
) && MEM_POINTER (op
)))
3177 case RTX_COMM_ARITH
:
3178 /* Prefer operands that are themselves commutative to be first.
3179 This helps to make things linear. In particular,
3180 (and (and (reg) (reg)) (not (reg))) is canonical. */
3184 /* If only one operand is a binary expression, it will be the first
3185 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3186 is canonical, although it will usually be further simplified. */
3190 /* Then prefer NEG and NOT. */
3191 if (code
== NEG
|| code
== NOT
)
3199 /* Return 1 iff it is necessary to swap operands of commutative operation
3200 in order to canonicalize expression. */
3203 swap_commutative_operands_p (rtx x
, rtx y
)
3205 return (commutative_operand_precedence (x
)
3206 < commutative_operand_precedence (y
));
3209 /* Return 1 if X is an autoincrement side effect and the register is
3210 not the stack pointer. */
3212 auto_inc_p (const_rtx x
)
3214 switch (GET_CODE (x
))
3222 /* There are no REG_INC notes for SP. */
3223 if (XEXP (x
, 0) != stack_pointer_rtx
)
3231 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3233 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3242 code
= GET_CODE (in
);
3243 fmt
= GET_RTX_FORMAT (code
);
3244 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3248 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3251 else if (fmt
[i
] == 'E')
3252 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3253 if (loc
== &XVECEXP (in
, i
, j
)
3254 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3260 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3261 and SUBREG_BYTE, return the bit offset where the subreg begins
3262 (counting from the least significant bit of the operand). */
3265 subreg_lsb_1 (machine_mode outer_mode
,
3266 machine_mode inner_mode
,
3267 unsigned int subreg_byte
)
3269 unsigned int bitpos
;
3273 /* A paradoxical subreg begins at bit position 0. */
3274 if (GET_MODE_PRECISION (outer_mode
) > GET_MODE_PRECISION (inner_mode
))
3277 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3278 /* If the subreg crosses a word boundary ensure that
3279 it also begins and ends on a word boundary. */
3280 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3281 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3282 && (subreg_byte
% UNITS_PER_WORD
3283 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3285 if (WORDS_BIG_ENDIAN
)
3286 word
= (GET_MODE_SIZE (inner_mode
)
3287 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3289 word
= subreg_byte
/ UNITS_PER_WORD
;
3290 bitpos
= word
* BITS_PER_WORD
;
3292 if (BYTES_BIG_ENDIAN
)
3293 byte
= (GET_MODE_SIZE (inner_mode
)
3294 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3296 byte
= subreg_byte
% UNITS_PER_WORD
;
3297 bitpos
+= byte
* BITS_PER_UNIT
;
3302 /* Given a subreg X, return the bit offset where the subreg begins
3303 (counting from the least significant bit of the reg). */
3306 subreg_lsb (const_rtx x
)
3308 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3312 /* Fill in information about a subreg of a hard register.
3313 xregno - A regno of an inner hard subreg_reg (or what will become one).
3314 xmode - The mode of xregno.
3315 offset - The byte offset.
3316 ymode - The mode of a top level SUBREG (or what may become one).
3317 info - Pointer to structure to fill in.
3319 Rather than considering one particular inner register (and thus one
3320 particular "outer" register) in isolation, this function really uses
3321 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3322 function does not check whether adding INFO->offset to XREGNO gives
3323 a valid hard register; even if INFO->offset + XREGNO is out of range,
3324 there might be another register of the same type that is in range.
3325 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3326 register, since that can depend on things like whether the final
3327 register number is even or odd. Callers that want to check whether
3328 this particular subreg can be replaced by a simple (reg ...) should
3329 use simplify_subreg_regno. */
3332 subreg_get_info (unsigned int xregno
, machine_mode xmode
,
3333 unsigned int offset
, machine_mode ymode
,
3334 struct subreg_info
*info
)
3336 int nregs_xmode
, nregs_ymode
;
3337 int mode_multiple
, nregs_multiple
;
3338 int offset_adj
, y_offset
, y_offset_adj
;
3339 int regsize_xmode
, regsize_ymode
;
3342 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3346 /* If there are holes in a non-scalar mode in registers, we expect
3347 that it is made up of its units concatenated together. */
3348 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3350 machine_mode xmode_unit
;
3352 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3353 if (GET_MODE_INNER (xmode
) == VOIDmode
)
3356 xmode_unit
= GET_MODE_INNER (xmode
);
3357 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3358 gcc_assert (nregs_xmode
3359 == (GET_MODE_NUNITS (xmode
)
3360 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3361 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3362 == (hard_regno_nregs
[xregno
][xmode_unit
]
3363 * GET_MODE_NUNITS (xmode
)));
3365 /* You can only ask for a SUBREG of a value with holes in the middle
3366 if you don't cross the holes. (Such a SUBREG should be done by
3367 picking a different register class, or doing it in memory if
3368 necessary.) An example of a value with holes is XCmode on 32-bit
3369 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3370 3 for each part, but in memory it's two 128-bit parts.
3371 Padding is assumed to be at the end (not necessarily the 'high part')
3373 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1
3374 < GET_MODE_NUNITS (xmode
))
3375 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3376 != ((offset
+ GET_MODE_SIZE (ymode
) - 1)
3377 / GET_MODE_SIZE (xmode_unit
))))
3379 info
->representable_p
= false;
3384 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3386 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3388 /* Paradoxical subregs are otherwise valid. */
3391 && GET_MODE_PRECISION (ymode
) > GET_MODE_PRECISION (xmode
))
3393 info
->representable_p
= true;
3394 /* If this is a big endian paradoxical subreg, which uses more
3395 actual hard registers than the original register, we must
3396 return a negative offset so that we find the proper highpart
3398 if (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
3399 ? REG_WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
3400 info
->offset
= nregs_xmode
- nregs_ymode
;
3403 info
->nregs
= nregs_ymode
;
3407 /* If registers store different numbers of bits in the different
3408 modes, we cannot generally form this subreg. */
3409 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3410 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3411 && (GET_MODE_SIZE (xmode
) % nregs_xmode
) == 0
3412 && (GET_MODE_SIZE (ymode
) % nregs_ymode
) == 0)
3414 regsize_xmode
= GET_MODE_SIZE (xmode
) / nregs_xmode
;
3415 regsize_ymode
= GET_MODE_SIZE (ymode
) / nregs_ymode
;
3416 if (!rknown
&& regsize_xmode
> regsize_ymode
&& nregs_ymode
> 1)
3418 info
->representable_p
= false;
3420 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3421 info
->offset
= offset
/ regsize_xmode
;
3424 if (!rknown
&& regsize_ymode
> regsize_xmode
&& nregs_xmode
> 1)
3426 info
->representable_p
= false;
3428 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3429 info
->offset
= offset
/ regsize_xmode
;
3434 /* Lowpart subregs are otherwise valid. */
3435 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3437 info
->representable_p
= true;
3440 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3443 info
->nregs
= nregs_ymode
;
3448 /* This should always pass, otherwise we don't know how to verify
3449 the constraint. These conditions may be relaxed but
3450 subreg_regno_offset would need to be redesigned. */
3451 gcc_assert ((GET_MODE_SIZE (xmode
) % GET_MODE_SIZE (ymode
)) == 0);
3452 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3454 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
3455 && GET_MODE_SIZE (xmode
) > UNITS_PER_WORD
)
3457 HOST_WIDE_INT xsize
= GET_MODE_SIZE (xmode
);
3458 HOST_WIDE_INT ysize
= GET_MODE_SIZE (ymode
);
3459 HOST_WIDE_INT off_low
= offset
& (ysize
- 1);
3460 HOST_WIDE_INT off_high
= offset
& ~(ysize
- 1);
3461 offset
= (xsize
- ysize
- off_high
) | off_low
;
3463 /* The XMODE value can be seen as a vector of NREGS_XMODE
3464 values. The subreg must represent a lowpart of given field.
3465 Compute what field it is. */
3466 offset_adj
= offset
;
3467 offset_adj
-= subreg_lowpart_offset (ymode
,
3468 mode_for_size (GET_MODE_BITSIZE (xmode
)
3472 /* Size of ymode must not be greater than the size of xmode. */
3473 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
3474 gcc_assert (mode_multiple
!= 0);
3476 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
3477 y_offset_adj
= offset_adj
/ GET_MODE_SIZE (ymode
);
3478 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
3480 gcc_assert ((offset_adj
% GET_MODE_SIZE (ymode
)) == 0);
3481 gcc_assert ((mode_multiple
% nregs_multiple
) == 0);
3485 info
->representable_p
= (!(y_offset_adj
% (mode_multiple
/ nregs_multiple
)));
3488 info
->offset
= (y_offset
/ (mode_multiple
/ nregs_multiple
)) * nregs_ymode
;
3489 info
->nregs
= nregs_ymode
;
3492 /* This function returns the regno offset of a subreg expression.
3493 xregno - A regno of an inner hard subreg_reg (or what will become one).
3494 xmode - The mode of xregno.
3495 offset - The byte offset.
3496 ymode - The mode of a top level SUBREG (or what may become one).
3497 RETURN - The regno offset which would be used. */
3499 subreg_regno_offset (unsigned int xregno
, machine_mode xmode
,
3500 unsigned int offset
, machine_mode ymode
)
3502 struct subreg_info info
;
3503 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3507 /* This function returns true when the offset is representable via
3508 subreg_offset in the given regno.
3509 xregno - A regno of an inner hard subreg_reg (or what will become one).
3510 xmode - The mode of xregno.
3511 offset - The byte offset.
3512 ymode - The mode of a top level SUBREG (or what may become one).
3513 RETURN - Whether the offset is representable. */
3515 subreg_offset_representable_p (unsigned int xregno
, machine_mode xmode
,
3516 unsigned int offset
, machine_mode ymode
)
3518 struct subreg_info info
;
3519 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3520 return info
.representable_p
;
3523 /* Return the number of a YMODE register to which
3525 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3527 can be simplified. Return -1 if the subreg can't be simplified.
3529 XREGNO is a hard register number. */
3532 simplify_subreg_regno (unsigned int xregno
, machine_mode xmode
,
3533 unsigned int offset
, machine_mode ymode
)
3535 struct subreg_info info
;
3536 unsigned int yregno
;
3538 #ifdef CANNOT_CHANGE_MODE_CLASS
3539 /* Give the backend a chance to disallow the mode change. */
3540 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3541 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3542 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3543 /* We can use mode change in LRA for some transformations. */
3544 && ! lra_in_progress
)
3548 /* We shouldn't simplify stack-related registers. */
3549 if ((!reload_completed
|| frame_pointer_needed
)
3550 && xregno
== FRAME_POINTER_REGNUM
)
3553 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3554 && xregno
== ARG_POINTER_REGNUM
)
3557 if (xregno
== STACK_POINTER_REGNUM
3558 /* We should convert hard stack register in LRA if it is
3560 && ! lra_in_progress
)
3563 /* Try to get the register offset. */
3564 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3565 if (!info
.representable_p
)
3568 /* Make sure that the offsetted register value is in range. */
3569 yregno
= xregno
+ info
.offset
;
3570 if (!HARD_REGISTER_NUM_P (yregno
))
3573 /* See whether (reg:YMODE YREGNO) is valid.
3575 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3576 This is a kludge to work around how complex FP arguments are passed
3577 on IA-64 and should be fixed. See PR target/49226. */
3578 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3579 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3582 return (int) yregno
;
3585 /* Return the final regno that a subreg expression refers to. */
3587 subreg_regno (const_rtx x
)
3590 rtx subreg
= SUBREG_REG (x
);
3591 int regno
= REGNO (subreg
);
3593 ret
= regno
+ subreg_regno_offset (regno
,
3601 /* Return the number of registers that a subreg expression refers
3604 subreg_nregs (const_rtx x
)
3606 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3609 /* Return the number of registers that a subreg REG with REGNO
3610 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3611 changed so that the regno can be passed in. */
3614 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3616 struct subreg_info info
;
3617 rtx subreg
= SUBREG_REG (x
);
3619 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3625 struct parms_set_data
3631 /* Helper function for noticing stores to parameter registers. */
3633 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3635 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3636 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3637 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3639 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3644 /* Look backward for first parameter to be loaded.
3645 Note that loads of all parameters will not necessarily be
3646 found if CSE has eliminated some of them (e.g., an argument
3647 to the outer function is passed down as a parameter).
3648 Do not skip BOUNDARY. */
3650 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
3652 struct parms_set_data parm
;
3654 rtx_insn
*before
, *first_set
;
3656 /* Since different machines initialize their parameter registers
3657 in different orders, assume nothing. Collect the set of all
3658 parameter registers. */
3659 CLEAR_HARD_REG_SET (parm
.regs
);
3661 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3662 if (GET_CODE (XEXP (p
, 0)) == USE
3663 && REG_P (XEXP (XEXP (p
, 0), 0)))
3665 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3667 /* We only care about registers which can hold function
3669 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3672 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3676 first_set
= call_insn
;
3678 /* Search backward for the first set of a register in this set. */
3679 while (parm
.nregs
&& before
!= boundary
)
3681 before
= PREV_INSN (before
);
3683 /* It is possible that some loads got CSEed from one call to
3684 another. Stop in that case. */
3685 if (CALL_P (before
))
3688 /* Our caller needs either ensure that we will find all sets
3689 (in case code has not been optimized yet), or take care
3690 for possible labels in a way by setting boundary to preceding
3692 if (LABEL_P (before
))
3694 gcc_assert (before
== boundary
);
3698 if (INSN_P (before
))
3700 int nregs_old
= parm
.nregs
;
3701 note_stores (PATTERN (before
), parms_set
, &parm
);
3702 /* If we found something that did not set a parameter reg,
3703 we're done. Do not keep going, as that might result
3704 in hoisting an insn before the setting of a pseudo
3705 that is used by the hoisted insn. */
3706 if (nregs_old
!= parm
.nregs
)
3715 /* Return true if we should avoid inserting code between INSN and preceding
3716 call instruction. */
3719 keep_with_call_p (const rtx_insn
*insn
)
3723 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
3725 if (REG_P (SET_DEST (set
))
3726 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
3727 && fixed_regs
[REGNO (SET_DEST (set
))]
3728 && general_operand (SET_SRC (set
), VOIDmode
))
3730 if (REG_P (SET_SRC (set
))
3731 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
3732 && REG_P (SET_DEST (set
))
3733 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3735 /* There may be a stack pop just after the call and before the store
3736 of the return register. Search for the actual store when deciding
3737 if we can break or not. */
3738 if (SET_DEST (set
) == stack_pointer_rtx
)
3740 /* This CONST_CAST is okay because next_nonnote_insn just
3741 returns its argument and we assign it to a const_rtx
3744 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
3745 if (i2
&& keep_with_call_p (i2
))
3752 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3753 to non-complex jumps. That is, direct unconditional, conditional,
3754 and tablejumps, but not computed jumps or returns. It also does
3755 not apply to the fallthru case of a conditional jump. */
3758 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
3760 rtx tmp
= JUMP_LABEL (jump_insn
);
3761 rtx_jump_table_data
*table
;
3766 if (tablejump_p (jump_insn
, NULL
, &table
))
3768 rtvec vec
= table
->get_labels ();
3769 int i
, veclen
= GET_NUM_ELEM (vec
);
3771 for (i
= 0; i
< veclen
; ++i
)
3772 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
3776 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
3783 /* Return an estimate of the cost of computing rtx X.
3784 One use is in cse, to decide which expression to keep in the hash table.
3785 Another is in rtl generation, to pick the cheapest way to multiply.
3786 Other uses like the latter are expected in the future.
3788 X appears as operand OPNO in an expression with code OUTER_CODE.
3789 SPEED specifies whether costs optimized for speed or size should
3793 rtx_cost (rtx x
, enum rtx_code outer_code
, int opno
, bool speed
)
3804 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3805 many insns, taking N times as long. */
3806 factor
= GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
;
3810 /* Compute the default costs of certain things.
3811 Note that targetm.rtx_costs can override the defaults. */
3813 code
= GET_CODE (x
);
3817 /* Multiplication has time-complexity O(N*N), where N is the
3818 number of units (translated from digits) when using
3819 schoolbook long multiplication. */
3820 total
= factor
* factor
* COSTS_N_INSNS (5);
3826 /* Similarly, complexity for schoolbook long division. */
3827 total
= factor
* factor
* COSTS_N_INSNS (7);
3830 /* Used in combine.c as a marker. */
3834 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3835 the mode for the factor. */
3836 factor
= GET_MODE_SIZE (GET_MODE (SET_DEST (x
))) / UNITS_PER_WORD
;
3841 total
= factor
* COSTS_N_INSNS (1);
3851 /* If we can't tie these modes, make this expensive. The larger
3852 the mode, the more expensive it is. */
3853 if (! MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (SUBREG_REG (x
))))
3854 return COSTS_N_INSNS (2 + factor
);
3858 if (targetm
.rtx_costs (x
, code
, outer_code
, opno
, &total
, speed
))
3863 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3864 which is already in total. */
3866 fmt
= GET_RTX_FORMAT (code
);
3867 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3869 total
+= rtx_cost (XEXP (x
, i
), code
, i
, speed
);
3870 else if (fmt
[i
] == 'E')
3871 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3872 total
+= rtx_cost (XVECEXP (x
, i
, j
), code
, i
, speed
);
3877 /* Fill in the structure C with information about both speed and size rtx
3878 costs for X, which is operand OPNO in an expression with code OUTER. */
3881 get_full_rtx_cost (rtx x
, enum rtx_code outer
, int opno
,
3882 struct full_rtx_costs
*c
)
3884 c
->speed
= rtx_cost (x
, outer
, opno
, true);
3885 c
->size
= rtx_cost (x
, outer
, opno
, false);
3889 /* Return cost of address expression X.
3890 Expect that X is properly formed address reference.
3892 SPEED parameter specify whether costs optimized for speed or size should
3896 address_cost (rtx x
, machine_mode mode
, addr_space_t as
, bool speed
)
3898 /* We may be asked for cost of various unusual addresses, such as operands
3899 of push instruction. It is not worthwhile to complicate writing
3900 of the target hook by such cases. */
3902 if (!memory_address_addr_space_p (mode
, x
, as
))
3905 return targetm
.address_cost (x
, mode
, as
, speed
);
3908 /* If the target doesn't override, compute the cost as with arithmetic. */
3911 default_address_cost (rtx x
, machine_mode
, addr_space_t
, bool speed
)
3913 return rtx_cost (x
, MEM
, 0, speed
);
3917 unsigned HOST_WIDE_INT
3918 nonzero_bits (const_rtx x
, machine_mode mode
)
3920 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3924 num_sign_bit_copies (const_rtx x
, machine_mode mode
)
3926 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3929 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3930 It avoids exponential behavior in nonzero_bits1 when X has
3931 identical subexpressions on the first or the second level. */
3933 static unsigned HOST_WIDE_INT
3934 cached_nonzero_bits (const_rtx x
, machine_mode mode
, const_rtx known_x
,
3935 machine_mode known_mode
,
3936 unsigned HOST_WIDE_INT known_ret
)
3938 if (x
== known_x
&& mode
== known_mode
)
3941 /* Try to find identical subexpressions. If found call
3942 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3943 precomputed value for the subexpression as KNOWN_RET. */
3945 if (ARITHMETIC_P (x
))
3947 rtx x0
= XEXP (x
, 0);
3948 rtx x1
= XEXP (x
, 1);
3950 /* Check the first level. */
3952 return nonzero_bits1 (x
, mode
, x0
, mode
,
3953 cached_nonzero_bits (x0
, mode
, known_x
,
3954 known_mode
, known_ret
));
3956 /* Check the second level. */
3957 if (ARITHMETIC_P (x0
)
3958 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
3959 return nonzero_bits1 (x
, mode
, x1
, mode
,
3960 cached_nonzero_bits (x1
, mode
, known_x
,
3961 known_mode
, known_ret
));
3963 if (ARITHMETIC_P (x1
)
3964 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
3965 return nonzero_bits1 (x
, mode
, x0
, mode
,
3966 cached_nonzero_bits (x0
, mode
, known_x
,
3967 known_mode
, known_ret
));
3970 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
3973 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3974 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3975 is less useful. We can't allow both, because that results in exponential
3976 run time recursion. There is a nullstone testcase that triggered
3977 this. This macro avoids accidental uses of num_sign_bit_copies. */
3978 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3980 /* Given an expression, X, compute which bits in X can be nonzero.
3981 We don't care about bits outside of those defined in MODE.
3983 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3984 an arithmetic operation, we can do better. */
3986 static unsigned HOST_WIDE_INT
3987 nonzero_bits1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
3988 machine_mode known_mode
,
3989 unsigned HOST_WIDE_INT known_ret
)
3991 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
3992 unsigned HOST_WIDE_INT inner_nz
;
3994 machine_mode inner_mode
;
3995 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
3997 /* For floating-point and vector values, assume all bits are needed. */
3998 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
3999 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4002 /* If X is wider than MODE, use its mode instead. */
4003 if (GET_MODE_PRECISION (GET_MODE (x
)) > mode_width
)
4005 mode
= GET_MODE (x
);
4006 nonzero
= GET_MODE_MASK (mode
);
4007 mode_width
= GET_MODE_PRECISION (mode
);
4010 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4011 /* Our only callers in this case look for single bit values. So
4012 just return the mode mask. Those tests will then be false. */
4015 #ifndef WORD_REGISTER_OPERATIONS
4016 /* If MODE is wider than X, but both are a single word for both the host
4017 and target machines, we can compute this from which bits of the
4018 object might be nonzero in its own mode, taking into account the fact
4019 that on many CISC machines, accessing an object in a wider mode
4020 causes the high-order bits to become undefined. So they are
4021 not known to be zero. */
4023 if (GET_MODE (x
) != VOIDmode
&& GET_MODE (x
) != mode
4024 && GET_MODE_PRECISION (GET_MODE (x
)) <= BITS_PER_WORD
4025 && GET_MODE_PRECISION (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
4026 && GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (GET_MODE (x
)))
4028 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
4029 known_x
, known_mode
, known_ret
);
4030 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
4035 code
= GET_CODE (x
);
4039 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4040 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4041 all the bits above ptr_mode are known to be zero. */
4042 /* As we do not know which address space the pointer is referring to,
4043 we can do this only if the target does not support different pointer
4044 or address modes depending on the address space. */
4045 if (target_default_pointer_address_modes_p ()
4046 && POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4048 nonzero
&= GET_MODE_MASK (ptr_mode
);
4051 /* Include declared information about alignment of pointers. */
4052 /* ??? We don't properly preserve REG_POINTER changes across
4053 pointer-to-integer casts, so we can't trust it except for
4054 things that we know must be pointers. See execute/960116-1.c. */
4055 if ((x
== stack_pointer_rtx
4056 || x
== frame_pointer_rtx
4057 || x
== arg_pointer_rtx
)
4058 && REGNO_POINTER_ALIGN (REGNO (x
)))
4060 unsigned HOST_WIDE_INT alignment
4061 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4063 #ifdef PUSH_ROUNDING
4064 /* If PUSH_ROUNDING is defined, it is possible for the
4065 stack to be momentarily aligned only to that amount,
4066 so we pick the least alignment. */
4067 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4068 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
4072 nonzero
&= ~(alignment
- 1);
4076 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4077 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
4078 known_mode
, known_ret
,
4082 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4083 known_mode
, known_ret
);
4085 return nonzero_for_hook
;
4089 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4090 /* If X is negative in MODE, sign-extend the value. */
4092 && mode_width
< BITS_PER_WORD
4093 && (UINTVAL (x
) & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
4095 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4101 #ifdef LOAD_EXTEND_OP
4102 /* In many, if not most, RISC machines, reading a byte from memory
4103 zeros the rest of the register. Noticing that fact saves a lot
4104 of extra zero-extends. */
4105 if (LOAD_EXTEND_OP (GET_MODE (x
)) == ZERO_EXTEND
)
4106 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
4111 case UNEQ
: case LTGT
:
4112 case GT
: case GTU
: case UNGT
:
4113 case LT
: case LTU
: case UNLT
:
4114 case GE
: case GEU
: case UNGE
:
4115 case LE
: case LEU
: case UNLE
:
4116 case UNORDERED
: case ORDERED
:
4117 /* If this produces an integer result, we know which bits are set.
4118 Code here used to clear bits outside the mode of X, but that is
4120 /* Mind that MODE is the mode the caller wants to look at this
4121 operation in, and not the actual operation mode. We can wind
4122 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4123 that describes the results of a vector compare. */
4124 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
4125 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4126 nonzero
= STORE_FLAG_VALUE
;
4131 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4132 and num_sign_bit_copies. */
4133 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4134 == GET_MODE_PRECISION (GET_MODE (x
)))
4138 if (GET_MODE_PRECISION (GET_MODE (x
)) < mode_width
)
4139 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
4144 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4145 and num_sign_bit_copies. */
4146 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4147 == GET_MODE_PRECISION (GET_MODE (x
)))
4153 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4154 known_x
, known_mode
, known_ret
)
4155 & GET_MODE_MASK (mode
));
4159 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4160 known_x
, known_mode
, known_ret
);
4161 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4162 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4166 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4167 Otherwise, show all the bits in the outer mode but not the inner
4169 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4170 known_x
, known_mode
, known_ret
);
4171 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4173 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4174 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4175 inner_nz
|= (GET_MODE_MASK (mode
)
4176 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4179 nonzero
&= inner_nz
;
4183 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4184 known_x
, known_mode
, known_ret
)
4185 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4186 known_x
, known_mode
, known_ret
);
4190 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4192 unsigned HOST_WIDE_INT nonzero0
4193 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4194 known_x
, known_mode
, known_ret
);
4196 /* Don't call nonzero_bits for the second time if it cannot change
4198 if ((nonzero
& nonzero0
) != nonzero
)
4200 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4201 known_x
, known_mode
, known_ret
);
4205 case PLUS
: case MINUS
:
4207 case DIV
: case UDIV
:
4208 case MOD
: case UMOD
:
4209 /* We can apply the rules of arithmetic to compute the number of
4210 high- and low-order zero bits of these operations. We start by
4211 computing the width (position of the highest-order nonzero bit)
4212 and the number of low-order zero bits for each value. */
4214 unsigned HOST_WIDE_INT nz0
4215 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4216 known_x
, known_mode
, known_ret
);
4217 unsigned HOST_WIDE_INT nz1
4218 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4219 known_x
, known_mode
, known_ret
);
4220 int sign_index
= GET_MODE_PRECISION (GET_MODE (x
)) - 1;
4221 int width0
= floor_log2 (nz0
) + 1;
4222 int width1
= floor_log2 (nz1
) + 1;
4223 int low0
= floor_log2 (nz0
& -nz0
);
4224 int low1
= floor_log2 (nz1
& -nz1
);
4225 unsigned HOST_WIDE_INT op0_maybe_minusp
4226 = nz0
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4227 unsigned HOST_WIDE_INT op1_maybe_minusp
4228 = nz1
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4229 unsigned int result_width
= mode_width
;
4235 result_width
= MAX (width0
, width1
) + 1;
4236 result_low
= MIN (low0
, low1
);
4239 result_low
= MIN (low0
, low1
);
4242 result_width
= width0
+ width1
;
4243 result_low
= low0
+ low1
;
4248 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4249 result_width
= width0
;
4254 result_width
= width0
;
4259 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4260 result_width
= MIN (width0
, width1
);
4261 result_low
= MIN (low0
, low1
);
4266 result_width
= MIN (width0
, width1
);
4267 result_low
= MIN (low0
, low1
);
4273 if (result_width
< mode_width
)
4274 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << result_width
) - 1;
4277 nonzero
&= ~(((unsigned HOST_WIDE_INT
) 1 << result_low
) - 1);
4282 if (CONST_INT_P (XEXP (x
, 1))
4283 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4284 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (x
, 1))) - 1;
4288 /* If this is a SUBREG formed for a promoted variable that has
4289 been zero-extended, we know that at least the high-order bits
4290 are zero, though others might be too. */
4292 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4293 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4294 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4295 known_x
, known_mode
, known_ret
);
4297 inner_mode
= GET_MODE (SUBREG_REG (x
));
4298 /* If the inner mode is a single word for both the host and target
4299 machines, we can compute this from which bits of the inner
4300 object might be nonzero. */
4301 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4302 && (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
))
4304 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4305 known_x
, known_mode
, known_ret
);
4307 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4308 /* If this is a typical RISC machine, we only have to worry
4309 about the way loads are extended. */
4310 if ((LOAD_EXTEND_OP (inner_mode
) == SIGN_EXTEND
4311 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4312 : LOAD_EXTEND_OP (inner_mode
) != ZERO_EXTEND
)
4313 || !MEM_P (SUBREG_REG (x
)))
4316 /* On many CISC machines, accessing an object in a wider mode
4317 causes the high-order bits to become undefined. So they are
4318 not known to be zero. */
4319 if (GET_MODE_PRECISION (GET_MODE (x
))
4320 > GET_MODE_PRECISION (inner_mode
))
4321 nonzero
|= (GET_MODE_MASK (GET_MODE (x
))
4322 & ~GET_MODE_MASK (inner_mode
));
4331 /* The nonzero bits are in two classes: any bits within MODE
4332 that aren't in GET_MODE (x) are always significant. The rest of the
4333 nonzero bits are those that are significant in the operand of
4334 the shift when shifted the appropriate number of bits. This
4335 shows that high-order bits are cleared by the right shift and
4336 low-order bits by left shifts. */
4337 if (CONST_INT_P (XEXP (x
, 1))
4338 && INTVAL (XEXP (x
, 1)) >= 0
4339 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4340 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4342 machine_mode inner_mode
= GET_MODE (x
);
4343 unsigned int width
= GET_MODE_PRECISION (inner_mode
);
4344 int count
= INTVAL (XEXP (x
, 1));
4345 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4346 unsigned HOST_WIDE_INT op_nonzero
4347 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4348 known_x
, known_mode
, known_ret
);
4349 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4350 unsigned HOST_WIDE_INT outer
= 0;
4352 if (mode_width
> width
)
4353 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4355 if (code
== LSHIFTRT
)
4357 else if (code
== ASHIFTRT
)
4361 /* If the sign bit may have been nonzero before the shift, we
4362 need to mark all the places it could have been copied to
4363 by the shift as possibly nonzero. */
4364 if (inner
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1 - count
)))
4365 inner
|= (((unsigned HOST_WIDE_INT
) 1 << count
) - 1)
4368 else if (code
== ASHIFT
)
4371 inner
= ((inner
<< (count
% width
)
4372 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4374 nonzero
&= (outer
| inner
);
4380 /* This is at most the number of bits in the mode. */
4381 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4385 /* If CLZ has a known value at zero, then the nonzero bits are
4386 that value, plus the number of bits in the mode minus one. */
4387 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4389 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4395 /* If CTZ has a known value at zero, then the nonzero bits are
4396 that value, plus the number of bits in the mode minus one. */
4397 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4399 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4405 /* This is at most the number of bits in the mode minus 1. */
4406 nonzero
= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4415 unsigned HOST_WIDE_INT nonzero_true
4416 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4417 known_x
, known_mode
, known_ret
);
4419 /* Don't call nonzero_bits for the second time if it cannot change
4421 if ((nonzero
& nonzero_true
) != nonzero
)
4422 nonzero
&= nonzero_true
4423 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4424 known_x
, known_mode
, known_ret
);
4435 /* See the macro definition above. */
4436 #undef cached_num_sign_bit_copies
4439 /* The function cached_num_sign_bit_copies is a wrapper around
4440 num_sign_bit_copies1. It avoids exponential behavior in
4441 num_sign_bit_copies1 when X has identical subexpressions on the
4442 first or the second level. */
4445 cached_num_sign_bit_copies (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4446 machine_mode known_mode
,
4447 unsigned int known_ret
)
4449 if (x
== known_x
&& mode
== known_mode
)
4452 /* Try to find identical subexpressions. If found call
4453 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4454 the precomputed value for the subexpression as KNOWN_RET. */
4456 if (ARITHMETIC_P (x
))
4458 rtx x0
= XEXP (x
, 0);
4459 rtx x1
= XEXP (x
, 1);
4461 /* Check the first level. */
4464 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4465 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4469 /* Check the second level. */
4470 if (ARITHMETIC_P (x0
)
4471 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4473 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4474 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4478 if (ARITHMETIC_P (x1
)
4479 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4481 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4482 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4487 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4490 /* Return the number of bits at the high-order end of X that are known to
4491 be equal to the sign bit. X will be used in mode MODE; if MODE is
4492 VOIDmode, X will be used in its own mode. The returned value will always
4493 be between 1 and the number of bits in MODE. */
4496 num_sign_bit_copies1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4497 machine_mode known_mode
,
4498 unsigned int known_ret
)
4500 enum rtx_code code
= GET_CODE (x
);
4501 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4502 int num0
, num1
, result
;
4503 unsigned HOST_WIDE_INT nonzero
;
4505 /* If we weren't given a mode, use the mode of X. If the mode is still
4506 VOIDmode, we don't know anything. Likewise if one of the modes is
4509 if (mode
== VOIDmode
)
4510 mode
= GET_MODE (x
);
4512 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4513 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4516 /* For a smaller object, just ignore the high bits. */
4517 if (bitwidth
< GET_MODE_PRECISION (GET_MODE (x
)))
4519 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4520 known_x
, known_mode
, known_ret
);
4522 num0
- (int) (GET_MODE_PRECISION (GET_MODE (x
)) - bitwidth
));
4525 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_PRECISION (GET_MODE (x
)))
4527 #ifndef WORD_REGISTER_OPERATIONS
4528 /* If this machine does not do all register operations on the entire
4529 register and MODE is wider than the mode of X, we can say nothing
4530 at all about the high-order bits. */
4533 /* Likewise on machines that do, if the mode of the object is smaller
4534 than a word and loads of that size don't sign extend, we can say
4535 nothing about the high order bits. */
4536 if (GET_MODE_PRECISION (GET_MODE (x
)) < BITS_PER_WORD
4537 #ifdef LOAD_EXTEND_OP
4538 && LOAD_EXTEND_OP (GET_MODE (x
)) != SIGN_EXTEND
4549 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4550 /* If pointers extend signed and this is a pointer in Pmode, say that
4551 all the bits above ptr_mode are known to be sign bit copies. */
4552 /* As we do not know which address space the pointer is referring to,
4553 we can do this only if the target does not support different pointer
4554 or address modes depending on the address space. */
4555 if (target_default_pointer_address_modes_p ()
4556 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4557 && mode
== Pmode
&& REG_POINTER (x
))
4558 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
4562 unsigned int copies_for_hook
= 1, copies
= 1;
4563 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4564 known_mode
, known_ret
,
4568 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4569 known_mode
, known_ret
);
4571 if (copies
> 1 || copies_for_hook
> 1)
4572 return MAX (copies
, copies_for_hook
);
4574 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4579 #ifdef LOAD_EXTEND_OP
4580 /* Some RISC machines sign-extend all loads of smaller than a word. */
4581 if (LOAD_EXTEND_OP (GET_MODE (x
)) == SIGN_EXTEND
)
4582 return MAX (1, ((int) bitwidth
4583 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1));
4588 /* If the constant is negative, take its 1's complement and remask.
4589 Then see how many zero bits we have. */
4590 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4591 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4592 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4593 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4595 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4598 /* If this is a SUBREG for a promoted object that is sign-extended
4599 and we are looking at it in a wider mode, we know that at least the
4600 high-order bits are known to be sign bit copies. */
4602 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
4604 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4605 known_x
, known_mode
, known_ret
);
4606 return MAX ((int) bitwidth
4607 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1,
4611 /* For a smaller object, just ignore the high bits. */
4612 if (bitwidth
<= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
))))
4614 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4615 known_x
, known_mode
, known_ret
);
4616 return MAX (1, (num0
4617 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
)))
4621 #ifdef WORD_REGISTER_OPERATIONS
4622 #ifdef LOAD_EXTEND_OP
4623 /* For paradoxical SUBREGs on machines where all register operations
4624 affect the entire register, just look inside. Note that we are
4625 passing MODE to the recursive call, so the number of sign bit copies
4626 will remain relative to that mode, not the inner mode. */
4628 /* This works only if loads sign extend. Otherwise, if we get a
4629 reload for the inner part, it may be loaded from the stack, and
4630 then we lose all sign bit copies that existed before the store
4633 if (paradoxical_subreg_p (x
)
4634 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4635 && MEM_P (SUBREG_REG (x
)))
4636 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4637 known_x
, known_mode
, known_ret
);
4643 if (CONST_INT_P (XEXP (x
, 1)))
4644 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
4648 return (bitwidth
- GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4649 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4650 known_x
, known_mode
, known_ret
));
4653 /* For a smaller object, just ignore the high bits. */
4654 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4655 known_x
, known_mode
, known_ret
);
4656 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4660 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4661 known_x
, known_mode
, known_ret
);
4663 case ROTATE
: case ROTATERT
:
4664 /* If we are rotating left by a number of bits less than the number
4665 of sign bit copies, we can just subtract that amount from the
4667 if (CONST_INT_P (XEXP (x
, 1))
4668 && INTVAL (XEXP (x
, 1)) >= 0
4669 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
4671 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4672 known_x
, known_mode
, known_ret
);
4673 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
4674 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
4679 /* In general, this subtracts one sign bit copy. But if the value
4680 is known to be positive, the number of sign bit copies is the
4681 same as that of the input. Finally, if the input has just one bit
4682 that might be nonzero, all the bits are copies of the sign bit. */
4683 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4684 known_x
, known_mode
, known_ret
);
4685 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4686 return num0
> 1 ? num0
- 1 : 1;
4688 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4693 && (((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
))
4698 case IOR
: case AND
: case XOR
:
4699 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
4700 /* Logical operations will preserve the number of sign-bit copies.
4701 MIN and MAX operations always return one of the operands. */
4702 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4703 known_x
, known_mode
, known_ret
);
4704 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4705 known_x
, known_mode
, known_ret
);
4707 /* If num1 is clearing some of the top bits then regardless of
4708 the other term, we are guaranteed to have at least that many
4709 high-order zero bits. */
4712 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4713 && CONST_INT_P (XEXP (x
, 1))
4714 && (UINTVAL (XEXP (x
, 1))
4715 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) == 0)
4718 /* Similarly for IOR when setting high-order bits. */
4721 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4722 && CONST_INT_P (XEXP (x
, 1))
4723 && (UINTVAL (XEXP (x
, 1))
4724 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4727 return MIN (num0
, num1
);
4729 case PLUS
: case MINUS
:
4730 /* For addition and subtraction, we can have a 1-bit carry. However,
4731 if we are subtracting 1 from a positive number, there will not
4732 be such a carry. Furthermore, if the positive number is known to
4733 be 0 or 1, we know the result is either -1 or 0. */
4735 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
4736 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
4738 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4739 if ((((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
) == 0)
4740 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
4741 : bitwidth
- floor_log2 (nonzero
) - 1);
4744 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4745 known_x
, known_mode
, known_ret
);
4746 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4747 known_x
, known_mode
, known_ret
);
4748 result
= MAX (1, MIN (num0
, num1
) - 1);
4753 /* The number of bits of the product is the sum of the number of
4754 bits of both terms. However, unless one of the terms if known
4755 to be positive, we must allow for an additional bit since negating
4756 a negative number can remove one sign bit copy. */
4758 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4759 known_x
, known_mode
, known_ret
);
4760 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4761 known_x
, known_mode
, known_ret
);
4763 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
4765 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4766 || (((nonzero_bits (XEXP (x
, 0), mode
)
4767 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4768 && ((nonzero_bits (XEXP (x
, 1), mode
)
4769 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)))
4773 return MAX (1, result
);
4776 /* The result must be <= the first operand. If the first operand
4777 has the high bit set, we know nothing about the number of sign
4779 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4781 else if ((nonzero_bits (XEXP (x
, 0), mode
)
4782 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4785 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4786 known_x
, known_mode
, known_ret
);
4789 /* The result must be <= the second operand. If the second operand
4790 has (or just might have) the high bit set, we know nothing about
4791 the number of sign bit copies. */
4792 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4794 else if ((nonzero_bits (XEXP (x
, 1), mode
)
4795 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4798 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4799 known_x
, known_mode
, known_ret
);
4802 /* Similar to unsigned division, except that we have to worry about
4803 the case where the divisor is negative, in which case we have
4805 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4806 known_x
, known_mode
, known_ret
);
4808 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4809 || (nonzero_bits (XEXP (x
, 1), mode
)
4810 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4816 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4817 known_x
, known_mode
, known_ret
);
4819 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4820 || (nonzero_bits (XEXP (x
, 1), mode
)
4821 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4827 /* Shifts by a constant add to the number of bits equal to the
4829 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4830 known_x
, known_mode
, known_ret
);
4831 if (CONST_INT_P (XEXP (x
, 1))
4832 && INTVAL (XEXP (x
, 1)) > 0
4833 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4834 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
4839 /* Left shifts destroy copies. */
4840 if (!CONST_INT_P (XEXP (x
, 1))
4841 || INTVAL (XEXP (x
, 1)) < 0
4842 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
4843 || INTVAL (XEXP (x
, 1)) >= GET_MODE_PRECISION (GET_MODE (x
)))
4846 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4847 known_x
, known_mode
, known_ret
);
4848 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
4851 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4852 known_x
, known_mode
, known_ret
);
4853 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
4854 known_x
, known_mode
, known_ret
);
4855 return MIN (num0
, num1
);
4857 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
4858 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
4859 case GEU
: case GTU
: case LEU
: case LTU
:
4860 case UNORDERED
: case ORDERED
:
4861 /* If the constant is negative, take its 1's complement and remask.
4862 Then see how many zero bits we have. */
4863 nonzero
= STORE_FLAG_VALUE
;
4864 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4865 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4866 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4868 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4874 /* If we haven't been able to figure it out by one of the above rules,
4875 see if some of the high-order bits are known to be zero. If so,
4876 count those bits and return one less than that amount. If we can't
4877 safely compute the mask for this mode, always return BITWIDTH. */
4879 bitwidth
= GET_MODE_PRECISION (mode
);
4880 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4883 nonzero
= nonzero_bits (x
, mode
);
4884 return nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))
4885 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
4888 /* Calculate the rtx_cost of a single instruction. A return value of
4889 zero indicates an instruction pattern without a known cost. */
4892 insn_rtx_cost (rtx pat
, bool speed
)
4897 /* Extract the single set rtx from the instruction pattern.
4898 We can't use single_set since we only have the pattern. */
4899 if (GET_CODE (pat
) == SET
)
4901 else if (GET_CODE (pat
) == PARALLEL
)
4904 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
4906 rtx x
= XVECEXP (pat
, 0, i
);
4907 if (GET_CODE (x
) == SET
)
4920 cost
= set_src_cost (SET_SRC (set
), speed
);
4921 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
4924 /* Returns estimate on cost of computing SEQ. */
4927 seq_cost (const rtx_insn
*seq
, bool speed
)
4932 for (; seq
; seq
= NEXT_INSN (seq
))
4934 set
= single_set (seq
);
4936 cost
+= set_rtx_cost (set
, speed
);
4944 /* Given an insn INSN and condition COND, return the condition in a
4945 canonical form to simplify testing by callers. Specifically:
4947 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4948 (2) Both operands will be machine operands; (cc0) will have been replaced.
4949 (3) If an operand is a constant, it will be the second operand.
4950 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4951 for GE, GEU, and LEU.
4953 If the condition cannot be understood, or is an inequality floating-point
4954 comparison which needs to be reversed, 0 will be returned.
4956 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4958 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4959 insn used in locating the condition was found. If a replacement test
4960 of the condition is desired, it should be placed in front of that
4961 insn and we will be sure that the inputs are still valid.
4963 If WANT_REG is nonzero, we wish the condition to be relative to that
4964 register, if possible. Therefore, do not canonicalize the condition
4965 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4966 to be a compare to a CC mode register.
4968 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4972 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
4973 rtx_insn
**earliest
,
4974 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
4977 rtx_insn
*prev
= insn
;
4981 int reverse_code
= 0;
4983 basic_block bb
= BLOCK_FOR_INSN (insn
);
4985 code
= GET_CODE (cond
);
4986 mode
= GET_MODE (cond
);
4987 op0
= XEXP (cond
, 0);
4988 op1
= XEXP (cond
, 1);
4991 code
= reversed_comparison_code (cond
, insn
);
4992 if (code
== UNKNOWN
)
4998 /* If we are comparing a register with zero, see if the register is set
4999 in the previous insn to a COMPARE or a comparison operation. Perform
5000 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5003 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5004 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5005 && op1
== CONST0_RTX (GET_MODE (op0
))
5008 /* Set nonzero when we find something of interest. */
5012 /* If comparison with cc0, import actual comparison from compare
5016 if ((prev
= prev_nonnote_insn (prev
)) == 0
5017 || !NONJUMP_INSN_P (prev
)
5018 || (set
= single_set (prev
)) == 0
5019 || SET_DEST (set
) != cc0_rtx
)
5022 op0
= SET_SRC (set
);
5023 op1
= CONST0_RTX (GET_MODE (op0
));
5029 /* If this is a COMPARE, pick up the two things being compared. */
5030 if (GET_CODE (op0
) == COMPARE
)
5032 op1
= XEXP (op0
, 1);
5033 op0
= XEXP (op0
, 0);
5036 else if (!REG_P (op0
))
5039 /* Go back to the previous insn. Stop if it is not an INSN. We also
5040 stop if it isn't a single set or if it has a REG_INC note because
5041 we don't want to bother dealing with it. */
5043 prev
= prev_nonnote_nondebug_insn (prev
);
5046 || !NONJUMP_INSN_P (prev
)
5047 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5048 /* In cfglayout mode, there do not have to be labels at the
5049 beginning of a block, or jumps at the end, so the previous
5050 conditions would not stop us when we reach bb boundary. */
5051 || BLOCK_FOR_INSN (prev
) != bb
)
5054 set
= set_of (op0
, prev
);
5057 && (GET_CODE (set
) != SET
5058 || !rtx_equal_p (SET_DEST (set
), op0
)))
5061 /* If this is setting OP0, get what it sets it to if it looks
5065 machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5066 #ifdef FLOAT_STORE_FLAG_VALUE
5067 REAL_VALUE_TYPE fsfv
;
5070 /* ??? We may not combine comparisons done in a CCmode with
5071 comparisons not done in a CCmode. This is to aid targets
5072 like Alpha that have an IEEE compliant EQ instruction, and
5073 a non-IEEE compliant BEQ instruction. The use of CCmode is
5074 actually artificial, simply to prevent the combination, but
5075 should not affect other platforms.
5077 However, we must allow VOIDmode comparisons to match either
5078 CCmode or non-CCmode comparison, because some ports have
5079 modeless comparisons inside branch patterns.
5081 ??? This mode check should perhaps look more like the mode check
5082 in simplify_comparison in combine. */
5083 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5084 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5086 && inner_mode
!= VOIDmode
)
5088 if (GET_CODE (SET_SRC (set
)) == COMPARE
5091 && val_signbit_known_set_p (inner_mode
,
5093 #ifdef FLOAT_STORE_FLAG_VALUE
5095 && SCALAR_FLOAT_MODE_P (inner_mode
)
5096 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5097 REAL_VALUE_NEGATIVE (fsfv
)))
5100 && COMPARISON_P (SET_SRC (set
))))
5102 else if (((code
== EQ
5104 && val_signbit_known_set_p (inner_mode
,
5106 #ifdef FLOAT_STORE_FLAG_VALUE
5108 && SCALAR_FLOAT_MODE_P (inner_mode
)
5109 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5110 REAL_VALUE_NEGATIVE (fsfv
)))
5113 && COMPARISON_P (SET_SRC (set
)))
5118 else if ((code
== EQ
|| code
== NE
)
5119 && GET_CODE (SET_SRC (set
)) == XOR
)
5120 /* Handle sequences like:
5123 ...(eq|ne op0 (const_int 0))...
5127 (eq op0 (const_int 0)) reduces to (eq X Y)
5128 (ne op0 (const_int 0)) reduces to (ne X Y)
5130 This is the form used by MIPS16, for example. */
5136 else if (reg_set_p (op0
, prev
))
5137 /* If this sets OP0, but not directly, we have to give up. */
5142 /* If the caller is expecting the condition to be valid at INSN,
5143 make sure X doesn't change before INSN. */
5144 if (valid_at_insn_p
)
5145 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5147 if (COMPARISON_P (x
))
5148 code
= GET_CODE (x
);
5151 code
= reversed_comparison_code (x
, prev
);
5152 if (code
== UNKNOWN
)
5157 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5163 /* If constant is first, put it last. */
5164 if (CONSTANT_P (op0
))
5165 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5167 /* If OP0 is the result of a comparison, we weren't able to find what
5168 was really being compared, so fail. */
5170 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5173 /* Canonicalize any ordered comparison with integers involving equality
5174 if we can do computations in the relevant mode and we do not
5177 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
5178 && CONST_INT_P (op1
)
5179 && GET_MODE (op0
) != VOIDmode
5180 && GET_MODE_PRECISION (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
5182 HOST_WIDE_INT const_val
= INTVAL (op1
);
5183 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5184 unsigned HOST_WIDE_INT max_val
5185 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
5190 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5191 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
5194 /* When cross-compiling, const_val might be sign-extended from
5195 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5197 if ((const_val
& max_val
)
5198 != ((unsigned HOST_WIDE_INT
) 1
5199 << (GET_MODE_PRECISION (GET_MODE (op0
)) - 1)))
5200 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
5204 if (uconst_val
< max_val
)
5205 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
5209 if (uconst_val
!= 0)
5210 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
5218 /* Never return CC0; return zero instead. */
5222 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5225 /* Given a jump insn JUMP, return the condition that will cause it to branch
5226 to its JUMP_LABEL. If the condition cannot be understood, or is an
5227 inequality floating-point comparison which needs to be reversed, 0 will
5230 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5231 insn used in locating the condition was found. If a replacement test
5232 of the condition is desired, it should be placed in front of that
5233 insn and we will be sure that the inputs are still valid. If EARLIEST
5234 is null, the returned condition will be valid at INSN.
5236 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5237 compare CC mode register.
5239 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5242 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5243 int valid_at_insn_p
)
5249 /* If this is not a standard conditional jump, we can't parse it. */
5251 || ! any_condjump_p (jump
))
5253 set
= pc_set (jump
);
5255 cond
= XEXP (SET_SRC (set
), 0);
5257 /* If this branches to JUMP_LABEL when the condition is false, reverse
5260 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5261 && LABEL_REF_LABEL (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
5263 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5264 allow_cc_mode
, valid_at_insn_p
);
5267 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5268 TARGET_MODE_REP_EXTENDED.
5270 Note that we assume that the property of
5271 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5272 narrower than mode B. I.e., if A is a mode narrower than B then in
5273 order to be able to operate on it in mode B, mode A needs to
5274 satisfy the requirements set by the representation of mode B. */
5277 init_num_sign_bit_copies_in_rep (void)
5279 machine_mode mode
, in_mode
;
5281 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
5282 in_mode
= GET_MODE_WIDER_MODE (mode
))
5283 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
5284 mode
= GET_MODE_WIDER_MODE (mode
))
5288 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5289 extends to the next widest mode. */
5290 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5291 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
5293 /* We are in in_mode. Count how many bits outside of mode
5294 have to be copies of the sign-bit. */
5295 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
5297 machine_mode wider
= GET_MODE_WIDER_MODE (i
);
5299 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5300 /* We can only check sign-bit copies starting from the
5301 top-bit. In order to be able to check the bits we
5302 have already seen we pretend that subsequent bits
5303 have to be sign-bit copies too. */
5304 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5305 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5306 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5311 /* Suppose that truncation from the machine mode of X to MODE is not a
5312 no-op. See if there is anything special about X so that we can
5313 assume it already contains a truncated value of MODE. */
5316 truncated_to_mode (machine_mode mode
, const_rtx x
)
5318 /* This register has already been used in MODE without explicit
5320 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5323 /* See if we already satisfy the requirements of MODE. If yes we
5324 can just switch to MODE. */
5325 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5326 && (num_sign_bit_copies (x
, GET_MODE (x
))
5327 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5333 /* Return true if RTX code CODE has a single sequence of zero or more
5334 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5335 entry in that case. */
5338 setup_reg_subrtx_bounds (unsigned int code
)
5340 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5342 for (; format
[i
] != 'e'; ++i
)
5345 /* No subrtxes. Leave start and count as 0. */
5347 if (format
[i
] == 'E' || format
[i
] == 'V')
5351 /* Record the sequence of 'e's. */
5352 rtx_all_subrtx_bounds
[code
].start
= i
;
5355 while (format
[i
] == 'e');
5356 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5357 /* rtl-iter.h relies on this. */
5358 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5360 for (; format
[i
]; ++i
)
5361 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5367 /* Initialize rtx_all_subrtx_bounds. */
5372 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5374 if (!setup_reg_subrtx_bounds (i
))
5375 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5376 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5377 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5380 init_num_sign_bit_copies_in_rep ();
5383 /* Check whether this is a constant pool constant. */
5385 constant_pool_constant_p (rtx x
)
5387 x
= avoid_constant_pool_reference (x
);
5388 return CONST_DOUBLE_P (x
);
5391 /* If M is a bitmask that selects a field of low-order bits within an item but
5392 not the entire word, return the length of the field. Return -1 otherwise.
5393 M is used in machine mode MODE. */
5396 low_bitmask_len (machine_mode mode
, unsigned HOST_WIDE_INT m
)
5398 if (mode
!= VOIDmode
)
5400 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
5402 m
&= GET_MODE_MASK (mode
);
5405 return exact_log2 (m
+ 1);
5408 /* Return the mode of MEM's address. */
5411 get_address_mode (rtx mem
)
5415 gcc_assert (MEM_P (mem
));
5416 mode
= GET_MODE (XEXP (mem
, 0));
5417 if (mode
!= VOIDmode
)
5419 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5422 /* Split up a CONST_DOUBLE or integer constant rtx
5423 into two rtx's for single words,
5424 storing in *FIRST the word that comes first in memory in the target
5425 and in *SECOND the other.
5427 TODO: This function needs to be rewritten to work on any size
5431 split_double (rtx value
, rtx
*first
, rtx
*second
)
5433 if (CONST_INT_P (value
))
5435 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5437 /* In this case the CONST_INT holds both target words.
5438 Extract the bits from it into two word-sized pieces.
5439 Sign extend each half to HOST_WIDE_INT. */
5440 unsigned HOST_WIDE_INT low
, high
;
5441 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5442 unsigned bits_per_word
= BITS_PER_WORD
;
5444 /* Set sign_bit to the most significant bit of a word. */
5446 sign_bit
<<= bits_per_word
- 1;
5448 /* Set mask so that all bits of the word are set. We could
5449 have used 1 << BITS_PER_WORD instead of basing the
5450 calculation on sign_bit. However, on machines where
5451 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5452 compiler warning, even though the code would never be
5454 mask
= sign_bit
<< 1;
5457 /* Set sign_extend as any remaining bits. */
5458 sign_extend
= ~mask
;
5460 /* Pick the lower word and sign-extend it. */
5461 low
= INTVAL (value
);
5466 /* Pick the higher word, shifted to the least significant
5467 bits, and sign-extend it. */
5468 high
= INTVAL (value
);
5469 high
>>= bits_per_word
- 1;
5472 if (high
& sign_bit
)
5473 high
|= sign_extend
;
5475 /* Store the words in the target machine order. */
5476 if (WORDS_BIG_ENDIAN
)
5478 *first
= GEN_INT (high
);
5479 *second
= GEN_INT (low
);
5483 *first
= GEN_INT (low
);
5484 *second
= GEN_INT (high
);
5489 /* The rule for using CONST_INT for a wider mode
5490 is that we regard the value as signed.
5491 So sign-extend it. */
5492 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
5493 if (WORDS_BIG_ENDIAN
)
5505 else if (GET_CODE (value
) == CONST_WIDE_INT
)
5507 /* All of this is scary code and needs to be converted to
5508 properly work with any size integer. */
5509 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
5510 if (WORDS_BIG_ENDIAN
)
5512 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5513 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5517 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5518 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5521 else if (!CONST_DOUBLE_P (value
))
5523 if (WORDS_BIG_ENDIAN
)
5525 *first
= const0_rtx
;
5531 *second
= const0_rtx
;
5534 else if (GET_MODE (value
) == VOIDmode
5535 /* This is the old way we did CONST_DOUBLE integers. */
5536 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
5538 /* In an integer, the words are defined as most and least significant.
5539 So order them by the target's convention. */
5540 if (WORDS_BIG_ENDIAN
)
5542 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5543 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
5547 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
5548 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5555 REAL_VALUE_FROM_CONST_DOUBLE (r
, value
);
5557 /* Note, this converts the REAL_VALUE_TYPE to the target's
5558 format, splits up the floating point double and outputs
5559 exactly 32 bits of it into each of l[0] and l[1] --
5560 not necessarily BITS_PER_WORD bits. */
5561 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
5563 /* If 32 bits is an entire word for the target, but not for the host,
5564 then sign-extend on the host so that the number will look the same
5565 way on the host that it would on the target. See for instance
5566 simplify_unary_operation. The #if is needed to avoid compiler
5569 #if HOST_BITS_PER_LONG > 32
5570 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
5572 if (l
[0] & ((long) 1 << 31))
5573 l
[0] |= ((long) (-1) << 32);
5574 if (l
[1] & ((long) 1 << 31))
5575 l
[1] |= ((long) (-1) << 32);
5579 *first
= GEN_INT (l
[0]);
5580 *second
= GEN_INT (l
[1]);
5584 /* Return true if X is a sign_extract or zero_extract from the least
5588 lsb_bitfield_op_p (rtx x
)
5590 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
5592 machine_mode mode
= GET_MODE (XEXP (x
, 0));
5593 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
5594 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
5596 return (pos
== (BITS_BIG_ENDIAN
? GET_MODE_PRECISION (mode
) - len
: 0));
5601 /* Strip outer address "mutations" from LOC and return a pointer to the
5602 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5603 stripped expression there.
5605 "Mutations" either convert between modes or apply some kind of
5606 extension, truncation or alignment. */
5609 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
5613 enum rtx_code code
= GET_CODE (*loc
);
5614 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
5615 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5616 used to convert between pointer sizes. */
5617 loc
= &XEXP (*loc
, 0);
5618 else if (lsb_bitfield_op_p (*loc
))
5619 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5620 acts as a combined truncation and extension. */
5621 loc
= &XEXP (*loc
, 0);
5622 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
5623 /* (and ... (const_int -X)) is used to align to X bytes. */
5624 loc
= &XEXP (*loc
, 0);
5625 else if (code
== SUBREG
5626 && !OBJECT_P (SUBREG_REG (*loc
))
5627 && subreg_lowpart_p (*loc
))
5628 /* (subreg (operator ...) ...) inside and is used for mode
5630 loc
= &SUBREG_REG (*loc
);
5638 /* Return true if CODE applies some kind of scale. The scaled value is
5639 is the first operand and the scale is the second. */
5642 binary_scale_code_p (enum rtx_code code
)
5644 return (code
== MULT
5646 /* Needed by ARM targets. */
5650 || code
== ROTATERT
);
5653 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5654 (see address_info). Return null otherwise. */
5657 get_base_term (rtx
*inner
)
5659 if (GET_CODE (*inner
) == LO_SUM
)
5660 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5663 || GET_CODE (*inner
) == SUBREG
5664 || GET_CODE (*inner
) == SCRATCH
)
5669 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5670 (see address_info). Return null otherwise. */
5673 get_index_term (rtx
*inner
)
5675 /* At present, only constant scales are allowed. */
5676 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
5677 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5680 || GET_CODE (*inner
) == SUBREG
)
5685 /* Set the segment part of address INFO to LOC, given that INNER is the
5689 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5691 gcc_assert (!info
->segment
);
5692 info
->segment
= loc
;
5693 info
->segment_term
= inner
;
5696 /* Set the base part of address INFO to LOC, given that INNER is the
5700 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5702 gcc_assert (!info
->base
);
5704 info
->base_term
= inner
;
5707 /* Set the index part of address INFO to LOC, given that INNER is the
5711 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5713 gcc_assert (!info
->index
);
5715 info
->index_term
= inner
;
5718 /* Set the displacement part of address INFO to LOC, given that INNER
5719 is the constant term. */
5722 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5724 gcc_assert (!info
->disp
);
5726 info
->disp_term
= inner
;
5729 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5730 rest of INFO accordingly. */
5733 decompose_incdec_address (struct address_info
*info
)
5735 info
->autoinc_p
= true;
5737 rtx
*base
= &XEXP (*info
->inner
, 0);
5738 set_address_base (info
, base
, base
);
5739 gcc_checking_assert (info
->base
== info
->base_term
);
5741 /* These addresses are only valid when the size of the addressed
5743 gcc_checking_assert (info
->mode
!= VOIDmode
);
5746 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5747 of INFO accordingly. */
5750 decompose_automod_address (struct address_info
*info
)
5752 info
->autoinc_p
= true;
5754 rtx
*base
= &XEXP (*info
->inner
, 0);
5755 set_address_base (info
, base
, base
);
5756 gcc_checking_assert (info
->base
== info
->base_term
);
5758 rtx plus
= XEXP (*info
->inner
, 1);
5759 gcc_assert (GET_CODE (plus
) == PLUS
);
5761 info
->base_term2
= &XEXP (plus
, 0);
5762 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
5764 rtx
*step
= &XEXP (plus
, 1);
5765 rtx
*inner_step
= strip_address_mutations (step
);
5766 if (CONSTANT_P (*inner_step
))
5767 set_address_disp (info
, step
, inner_step
);
5769 set_address_index (info
, step
, inner_step
);
5772 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5773 values in [PTR, END). Return a pointer to the end of the used array. */
5776 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
5779 if (GET_CODE (x
) == PLUS
)
5781 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
5782 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
5786 gcc_assert (ptr
!= end
);
5792 /* Evaluate the likelihood of X being a base or index value, returning
5793 positive if it is likely to be a base, negative if it is likely to be
5794 an index, and 0 if we can't tell. Make the magnitude of the return
5795 value reflect the amount of confidence we have in the answer.
5797 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5800 baseness (rtx x
, machine_mode mode
, addr_space_t as
,
5801 enum rtx_code outer_code
, enum rtx_code index_code
)
5803 /* Believe *_POINTER unless the address shape requires otherwise. */
5804 if (REG_P (x
) && REG_POINTER (x
))
5806 if (MEM_P (x
) && MEM_POINTER (x
))
5809 if (REG_P (x
) && HARD_REGISTER_P (x
))
5811 /* X is a hard register. If it only fits one of the base
5812 or index classes, choose that interpretation. */
5813 int regno
= REGNO (x
);
5814 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
5815 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
5816 if (base_p
!= index_p
)
5817 return base_p
? 1 : -1;
5822 /* INFO->INNER describes a normal, non-automodified address.
5823 Fill in the rest of INFO accordingly. */
5826 decompose_normal_address (struct address_info
*info
)
5828 /* Treat the address as the sum of up to four values. */
5830 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
5831 ops
+ ARRAY_SIZE (ops
)) - ops
;
5833 /* If there is more than one component, any base component is in a PLUS. */
5835 info
->base_outer_code
= PLUS
;
5837 /* Try to classify each sum operand now. Leave those that could be
5838 either a base or an index in OPS. */
5841 for (size_t in
= 0; in
< n_ops
; ++in
)
5844 rtx
*inner
= strip_address_mutations (loc
);
5845 if (CONSTANT_P (*inner
))
5846 set_address_disp (info
, loc
, inner
);
5847 else if (GET_CODE (*inner
) == UNSPEC
)
5848 set_address_segment (info
, loc
, inner
);
5851 /* The only other possibilities are a base or an index. */
5852 rtx
*base_term
= get_base_term (inner
);
5853 rtx
*index_term
= get_index_term (inner
);
5854 gcc_assert (base_term
|| index_term
);
5856 set_address_index (info
, loc
, index_term
);
5857 else if (!index_term
)
5858 set_address_base (info
, loc
, base_term
);
5861 gcc_assert (base_term
== index_term
);
5863 inner_ops
[out
] = base_term
;
5869 /* Classify the remaining OPS members as bases and indexes. */
5872 /* If we haven't seen a base or an index yet, assume that this is
5873 the base. If we were confident that another term was the base
5874 or index, treat the remaining operand as the other kind. */
5876 set_address_base (info
, ops
[0], inner_ops
[0]);
5878 set_address_index (info
, ops
[0], inner_ops
[0]);
5882 /* In the event of a tie, assume the base comes first. */
5883 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
5885 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
5886 GET_CODE (*ops
[0])))
5888 set_address_base (info
, ops
[0], inner_ops
[0]);
5889 set_address_index (info
, ops
[1], inner_ops
[1]);
5893 set_address_base (info
, ops
[1], inner_ops
[1]);
5894 set_address_index (info
, ops
[0], inner_ops
[0]);
5898 gcc_assert (out
== 0);
5901 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
5902 or VOIDmode if not known. AS is the address space associated with LOC.
5903 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
5906 decompose_address (struct address_info
*info
, rtx
*loc
, machine_mode mode
,
5907 addr_space_t as
, enum rtx_code outer_code
)
5909 memset (info
, 0, sizeof (*info
));
5912 info
->addr_outer_code
= outer_code
;
5914 info
->inner
= strip_address_mutations (loc
, &outer_code
);
5915 info
->base_outer_code
= outer_code
;
5916 switch (GET_CODE (*info
->inner
))
5922 decompose_incdec_address (info
);
5927 decompose_automod_address (info
);
5931 decompose_normal_address (info
);
5936 /* Describe address operand LOC in INFO. */
5939 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
5941 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
5944 /* Describe the address of MEM X in INFO. */
5947 decompose_mem_address (struct address_info
*info
, rtx x
)
5949 gcc_assert (MEM_P (x
));
5950 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
5951 MEM_ADDR_SPACE (x
), MEM
);
5954 /* Update INFO after a change to the address it describes. */
5957 update_address (struct address_info
*info
)
5959 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
5960 info
->addr_outer_code
);
5963 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
5964 more complicated than that. */
5967 get_index_scale (const struct address_info
*info
)
5969 rtx index
= *info
->index
;
5970 if (GET_CODE (index
) == MULT
5971 && CONST_INT_P (XEXP (index
, 1))
5972 && info
->index_term
== &XEXP (index
, 0))
5973 return INTVAL (XEXP (index
, 1));
5975 if (GET_CODE (index
) == ASHIFT
5976 && CONST_INT_P (XEXP (index
, 1))
5977 && info
->index_term
== &XEXP (index
, 0))
5978 return (HOST_WIDE_INT
) 1 << INTVAL (XEXP (index
, 1));
5980 if (info
->index
== info
->index_term
)
5986 /* Return the "index code" of INFO, in the form required by
5990 get_index_code (const struct address_info
*info
)
5993 return GET_CODE (*info
->index
);
5996 return GET_CODE (*info
->disp
);
6001 /* Return true if X contains a thread-local symbol. */
6004 tls_referenced_p (const_rtx x
)
6006 if (!targetm
.have_tls
)
6009 subrtx_iterator::array_type array
;
6010 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6011 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)