1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
28 #include "insn-config.h"
37 #include "basic-block.h"
41 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
42 #include "addresses.h"
45 /* Forward declarations */
46 static void set_of_1 (rtx
, const_rtx
, void *);
47 static bool covers_regno_p (const_rtx
, unsigned int);
48 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
49 static int computed_jump_p_1 (const_rtx
);
50 static void parms_set (rtx
, const_rtx
, void *);
52 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, machine_mode
,
53 const_rtx
, machine_mode
,
54 unsigned HOST_WIDE_INT
);
55 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, machine_mode
,
56 const_rtx
, machine_mode
,
57 unsigned HOST_WIDE_INT
);
58 static unsigned int cached_num_sign_bit_copies (const_rtx
, machine_mode
, const_rtx
,
61 static unsigned int num_sign_bit_copies1 (const_rtx
, machine_mode
, const_rtx
,
62 machine_mode
, unsigned int);
64 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
65 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
67 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
68 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
69 SIGN_EXTEND then while narrowing we also have to enforce the
70 representation and sign-extend the value to mode DESTINATION_REP.
72 If the value is already sign-extended to DESTINATION_REP mode we
73 can just switch to DESTINATION mode on it. For each pair of
74 integral modes SOURCE and DESTINATION, when truncating from SOURCE
75 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
76 contains the number of high-order bits in SOURCE that have to be
77 copies of the sign-bit so that we can do this mode-switch to
81 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
83 /* Store X into index I of ARRAY. ARRAY is known to have at least I
84 elements. Return the new base of ARRAY. */
87 typename
T::value_type
*
88 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
90 size_t i
, value_type x
)
92 if (base
== array
.stack
)
99 gcc_checking_assert (i
== LOCAL_ELEMS
);
100 /* A previous iteration might also have moved from the stack to the
101 heap, in which case the heap array will already be big enough. */
102 if (vec_safe_length (array
.heap
) <= i
)
103 vec_safe_grow (array
.heap
, i
+ 1);
104 base
= array
.heap
->address ();
105 memcpy (base
, array
.stack
, sizeof (array
.stack
));
106 base
[LOCAL_ELEMS
] = x
;
109 unsigned int length
= array
.heap
->length ();
112 gcc_checking_assert (base
== array
.heap
->address ());
118 gcc_checking_assert (i
== length
);
119 vec_safe_push (array
.heap
, x
);
120 return array
.heap
->address ();
124 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
125 number of elements added to the worklist. */
127 template <typename T
>
129 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
131 size_t end
, rtx_type x
)
133 enum rtx_code code
= GET_CODE (x
);
134 const char *format
= GET_RTX_FORMAT (code
);
135 size_t orig_end
= end
;
136 if (__builtin_expect (INSN_P (x
), false))
138 /* Put the pattern at the top of the queue, since that's what
139 we're likely to want most. It also allows for the SEQUENCE
141 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
142 if (format
[i
] == 'e')
144 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
145 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
148 base
= add_single_to_queue (array
, base
, end
++, subx
);
152 for (int i
= 0; format
[i
]; ++i
)
153 if (format
[i
] == 'e')
155 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
156 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
159 base
= add_single_to_queue (array
, base
, end
++, subx
);
161 else if (format
[i
] == 'E')
163 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
164 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
165 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
166 for (unsigned int j
= 0; j
< length
; j
++)
167 base
[end
++] = T::get_value (vec
[j
]);
169 for (unsigned int j
= 0; j
< length
; j
++)
170 base
= add_single_to_queue (array
, base
, end
++,
171 T::get_value (vec
[j
]));
172 if (code
== SEQUENCE
&& end
== length
)
173 /* If the subrtxes of the sequence fill the entire array then
174 we know that no other parts of a containing insn are queued.
175 The caller is therefore iterating over the sequence as a
176 PATTERN (...), so we also want the patterns of the
178 for (unsigned int j
= 0; j
< length
; j
++)
180 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
182 base
[j
] = T::get_value (PATTERN (x
));
185 return end
- orig_end
;
188 template <typename T
>
190 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
192 vec_free (array
.heap
);
195 template <typename T
>
196 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
198 template class generic_subrtx_iterator
<const_rtx_accessor
>;
199 template class generic_subrtx_iterator
<rtx_var_accessor
>;
200 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
202 /* Return 1 if the value of X is unstable
203 (would be different at a different point in the program).
204 The frame pointer, arg pointer, etc. are considered stable
205 (within one function) and so is anything marked `unchanging'. */
208 rtx_unstable_p (const_rtx x
)
210 const RTX_CODE code
= GET_CODE (x
);
217 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
226 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
227 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
228 /* The arg pointer varies if it is not a fixed register. */
229 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
231 /* ??? When call-clobbered, the value is stable modulo the restore
232 that must happen after a call. This currently screws up local-alloc
233 into believing that the restore is not needed. */
234 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
239 if (MEM_VOLATILE_P (x
))
248 fmt
= GET_RTX_FORMAT (code
);
249 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
252 if (rtx_unstable_p (XEXP (x
, i
)))
255 else if (fmt
[i
] == 'E')
258 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
259 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
266 /* Return 1 if X has a value that can vary even between two
267 executions of the program. 0 means X can be compared reliably
268 against certain constants or near-constants.
269 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
270 zero, we are slightly more conservative.
271 The frame pointer and the arg pointer are considered constant. */
274 rtx_varies_p (const_rtx x
, bool for_alias
)
287 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
296 /* Note that we have to test for the actual rtx used for the frame
297 and arg pointers and not just the register number in case we have
298 eliminated the frame and/or arg pointer and are using it
300 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
301 /* The arg pointer varies if it is not a fixed register. */
302 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
304 if (x
== pic_offset_table_rtx
305 /* ??? When call-clobbered, the value is stable modulo the restore
306 that must happen after a call. This currently screws up
307 local-alloc into believing that the restore is not needed, so we
308 must return 0 only if we are called from alias analysis. */
309 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
314 /* The operand 0 of a LO_SUM is considered constant
315 (in fact it is related specifically to operand 1)
316 during alias analysis. */
317 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
318 || rtx_varies_p (XEXP (x
, 1), for_alias
);
321 if (MEM_VOLATILE_P (x
))
330 fmt
= GET_RTX_FORMAT (code
);
331 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
334 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
337 else if (fmt
[i
] == 'E')
340 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
341 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
348 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
349 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
350 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
351 references on strict alignment machines. */
354 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
355 machine_mode mode
, bool unaligned_mems
)
357 enum rtx_code code
= GET_CODE (x
);
359 /* The offset must be a multiple of the mode size if we are considering
360 unaligned memory references on strict alignment machines. */
361 if (STRICT_ALIGNMENT
&& unaligned_mems
&& GET_MODE_SIZE (mode
) != 0)
363 HOST_WIDE_INT actual_offset
= offset
;
365 #ifdef SPARC_STACK_BOUNDARY_HACK
366 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
367 the real alignment of %sp. However, when it does this, the
368 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
369 if (SPARC_STACK_BOUNDARY_HACK
370 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
371 actual_offset
-= STACK_POINTER_OFFSET
;
374 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
381 if (SYMBOL_REF_WEAK (x
))
383 if (!CONSTANT_POOL_ADDRESS_P (x
))
386 HOST_WIDE_INT decl_size
;
391 size
= GET_MODE_SIZE (mode
);
395 /* If the size of the access or of the symbol is unknown,
397 decl
= SYMBOL_REF_DECL (x
);
399 /* Else check that the access is in bounds. TODO: restructure
400 expr_size/tree_expr_size/int_expr_size and just use the latter. */
403 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
404 decl_size
= (tree_fits_shwi_p (DECL_SIZE_UNIT (decl
))
405 ? tree_to_shwi (DECL_SIZE_UNIT (decl
))
407 else if (TREE_CODE (decl
) == STRING_CST
)
408 decl_size
= TREE_STRING_LENGTH (decl
);
409 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
410 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
414 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
423 /* Stack references are assumed not to trap, but we need to deal with
424 nonsensical offsets. */
425 if (x
== frame_pointer_rtx
)
427 HOST_WIDE_INT adj_offset
= offset
- STARTING_FRAME_OFFSET
;
429 size
= GET_MODE_SIZE (mode
);
430 if (FRAME_GROWS_DOWNWARD
)
432 if (adj_offset
< frame_offset
|| adj_offset
+ size
- 1 >= 0)
437 if (adj_offset
< 0 || adj_offset
+ size
- 1 >= frame_offset
)
442 /* ??? Need to add a similar guard for nonsensical offsets. */
443 if (x
== hard_frame_pointer_rtx
444 || x
== stack_pointer_rtx
445 /* The arg pointer varies if it is not a fixed register. */
446 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
448 /* All of the virtual frame registers are stack references. */
449 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
450 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
455 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
456 mode
, unaligned_mems
);
459 /* An address is assumed not to trap if:
460 - it is the pic register plus a constant. */
461 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
464 /* - or it is an address that can't trap plus a constant integer. */
465 if (CONST_INT_P (XEXP (x
, 1))
466 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
467 size
, mode
, unaligned_mems
))
474 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
475 mode
, unaligned_mems
);
482 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
483 mode
, unaligned_mems
);
489 /* If it isn't one of the case above, it can cause a trap. */
493 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
496 rtx_addr_can_trap_p (const_rtx x
)
498 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
501 /* Return true if X is an address that is known to not be zero. */
504 nonzero_address_p (const_rtx x
)
506 const enum rtx_code code
= GET_CODE (x
);
511 return !SYMBOL_REF_WEAK (x
);
517 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
518 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
519 || x
== stack_pointer_rtx
520 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
522 /* All of the virtual frame registers are stack references. */
523 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
524 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
529 return nonzero_address_p (XEXP (x
, 0));
532 /* Handle PIC references. */
533 if (XEXP (x
, 0) == pic_offset_table_rtx
534 && CONSTANT_P (XEXP (x
, 1)))
539 /* Similar to the above; allow positive offsets. Further, since
540 auto-inc is only allowed in memories, the register must be a
542 if (CONST_INT_P (XEXP (x
, 1))
543 && INTVAL (XEXP (x
, 1)) > 0)
545 return nonzero_address_p (XEXP (x
, 0));
548 /* Similarly. Further, the offset is always positive. */
555 return nonzero_address_p (XEXP (x
, 0));
558 return nonzero_address_p (XEXP (x
, 1));
564 /* If it isn't one of the case above, might be zero. */
568 /* Return 1 if X refers to a memory location whose address
569 cannot be compared reliably with constant addresses,
570 or if X refers to a BLKmode memory object.
571 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
572 zero, we are slightly more conservative. */
575 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
586 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
588 fmt
= GET_RTX_FORMAT (code
);
589 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
592 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
595 else if (fmt
[i
] == 'E')
598 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
599 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
605 /* Return the CALL in X if there is one. */
608 get_call_rtx_from (rtx x
)
612 if (GET_CODE (x
) == PARALLEL
)
613 x
= XVECEXP (x
, 0, 0);
614 if (GET_CODE (x
) == SET
)
616 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
621 /* Return the value of the integer term in X, if one is apparent;
623 Only obvious integer terms are detected.
624 This is used in cse.c with the `related_value' field. */
627 get_integer_term (const_rtx x
)
629 if (GET_CODE (x
) == CONST
)
632 if (GET_CODE (x
) == MINUS
633 && CONST_INT_P (XEXP (x
, 1)))
634 return - INTVAL (XEXP (x
, 1));
635 if (GET_CODE (x
) == PLUS
636 && CONST_INT_P (XEXP (x
, 1)))
637 return INTVAL (XEXP (x
, 1));
641 /* If X is a constant, return the value sans apparent integer term;
643 Only obvious integer terms are detected. */
646 get_related_value (const_rtx x
)
648 if (GET_CODE (x
) != CONST
)
651 if (GET_CODE (x
) == PLUS
652 && CONST_INT_P (XEXP (x
, 1)))
654 else if (GET_CODE (x
) == MINUS
655 && CONST_INT_P (XEXP (x
, 1)))
660 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
661 to somewhere in the same object or object_block as SYMBOL. */
664 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
668 if (GET_CODE (symbol
) != SYMBOL_REF
)
676 if (CONSTANT_POOL_ADDRESS_P (symbol
)
677 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
680 decl
= SYMBOL_REF_DECL (symbol
);
681 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
685 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
686 && SYMBOL_REF_BLOCK (symbol
)
687 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
688 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
689 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
695 /* Split X into a base and a constant offset, storing them in *BASE_OUT
696 and *OFFSET_OUT respectively. */
699 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
701 if (GET_CODE (x
) == CONST
)
704 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
706 *base_out
= XEXP (x
, 0);
707 *offset_out
= XEXP (x
, 1);
712 *offset_out
= const0_rtx
;
715 /* Return the number of places FIND appears within X. If COUNT_DEST is
716 zero, we do not count occurrences inside the destination of a SET. */
719 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
723 const char *format_ptr
;
742 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
744 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
748 if (MEM_P (find
) && rtx_equal_p (x
, find
))
753 if (SET_DEST (x
) == find
&& ! count_dest
)
754 return count_occurrences (SET_SRC (x
), find
, count_dest
);
761 format_ptr
= GET_RTX_FORMAT (code
);
764 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
766 switch (*format_ptr
++)
769 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
773 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
774 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
782 /* Return TRUE if OP is a register or subreg of a register that
783 holds an unsigned quantity. Otherwise, return FALSE. */
786 unsigned_reg_p (rtx op
)
790 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
793 if (GET_CODE (op
) == SUBREG
794 && SUBREG_PROMOTED_SIGN (op
))
801 /* Nonzero if register REG appears somewhere within IN.
802 Also works if REG is not a register; in this case it checks
803 for a subexpression of IN that is Lisp "equal" to REG. */
806 reg_mentioned_p (const_rtx reg
, const_rtx in
)
818 if (GET_CODE (in
) == LABEL_REF
)
819 return reg
== LABEL_REF_LABEL (in
);
821 code
= GET_CODE (in
);
825 /* Compare registers by number. */
827 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
829 /* These codes have no constituent expressions
837 /* These are kept unique for a given value. */
844 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
847 fmt
= GET_RTX_FORMAT (code
);
849 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
854 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
855 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
858 else if (fmt
[i
] == 'e'
859 && reg_mentioned_p (reg
, XEXP (in
, i
)))
865 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
866 no CODE_LABEL insn. */
869 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
874 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
880 /* Nonzero if register REG is used in an insn between
881 FROM_INSN and TO_INSN (exclusive of those two). */
884 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
885 const rtx_insn
*to_insn
)
889 if (from_insn
== to_insn
)
892 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
893 if (NONDEBUG_INSN_P (insn
)
894 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
895 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
900 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
901 is entirely replaced by a new value and the only use is as a SET_DEST,
902 we do not consider it a reference. */
905 reg_referenced_p (const_rtx x
, const_rtx body
)
909 switch (GET_CODE (body
))
912 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
915 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
916 of a REG that occupies all of the REG, the insn references X if
917 it is mentioned in the destination. */
918 if (GET_CODE (SET_DEST (body
)) != CC0
919 && GET_CODE (SET_DEST (body
)) != PC
920 && !REG_P (SET_DEST (body
))
921 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
922 && REG_P (SUBREG_REG (SET_DEST (body
)))
923 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
924 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
925 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
926 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
927 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
932 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
933 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
940 return reg_overlap_mentioned_p (x
, body
);
943 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
946 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
949 case UNSPEC_VOLATILE
:
950 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
951 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
956 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
957 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
962 if (MEM_P (XEXP (body
, 0)))
963 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
968 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
970 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
977 /* Nonzero if register REG is set or clobbered in an insn between
978 FROM_INSN and TO_INSN (exclusive of those two). */
981 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
982 const rtx_insn
*to_insn
)
984 const rtx_insn
*insn
;
986 if (from_insn
== to_insn
)
989 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
990 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
995 /* Internals of reg_set_between_p. */
997 reg_set_p (const_rtx reg
, const_rtx insn
)
999 /* After delay slot handling, call and branch insns might be in a
1000 sequence. Check all the elements there. */
1001 if (INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == SEQUENCE
)
1003 for (int i
= 0; i
< XVECLEN (PATTERN (insn
), 0); ++i
)
1004 if (reg_set_p (reg
, XVECEXP (PATTERN (insn
), 0, i
)))
1010 /* We can be passed an insn or part of one. If we are passed an insn,
1011 check if a side-effect of the insn clobbers REG. */
1013 && (FIND_REG_INC_NOTE (insn
, reg
)
1016 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1017 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
1018 GET_MODE (reg
), REGNO (reg
)))
1020 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1023 return set_of (reg
, insn
) != NULL_RTX
;
1026 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1027 only if none of them are modified between START and END. Return 1 if
1028 X contains a MEM; this routine does use memory aliasing. */
1031 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1033 const enum rtx_code code
= GET_CODE (x
);
1054 if (modified_between_p (XEXP (x
, 0), start
, end
))
1056 if (MEM_READONLY_P (x
))
1058 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1059 if (memory_modified_in_insn_p (x
, insn
))
1065 return reg_set_between_p (x
, start
, end
);
1071 fmt
= GET_RTX_FORMAT (code
);
1072 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1074 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1077 else if (fmt
[i
] == 'E')
1078 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1079 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1086 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1087 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1088 does use memory aliasing. */
1091 modified_in_p (const_rtx x
, const_rtx insn
)
1093 const enum rtx_code code
= GET_CODE (x
);
1110 if (modified_in_p (XEXP (x
, 0), insn
))
1112 if (MEM_READONLY_P (x
))
1114 if (memory_modified_in_insn_p (x
, insn
))
1120 return reg_set_p (x
, insn
);
1126 fmt
= GET_RTX_FORMAT (code
);
1127 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1129 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1132 else if (fmt
[i
] == 'E')
1133 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1134 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1141 /* Helper function for set_of. */
1149 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1151 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1152 if (rtx_equal_p (x
, data
->pat
)
1153 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1157 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1158 (either directly or via STRICT_LOW_PART and similar modifiers). */
1160 set_of (const_rtx pat
, const_rtx insn
)
1162 struct set_of_data data
;
1163 data
.found
= NULL_RTX
;
1165 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1169 /* Add all hard register in X to *PSET. */
1171 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1173 subrtx_iterator::array_type array
;
1174 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1176 const_rtx x
= *iter
;
1177 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1178 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1182 /* This function, called through note_stores, collects sets and
1183 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1186 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1188 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1189 if (REG_P (x
) && HARD_REGISTER_P (x
))
1190 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1193 /* Examine INSN, and compute the set of hard registers written by it.
1194 Store it in *PSET. Should only be called after reload. */
1196 find_all_hard_reg_sets (const rtx_insn
*insn
, HARD_REG_SET
*pset
, bool implicit
)
1200 CLEAR_HARD_REG_SET (*pset
);
1201 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1205 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1207 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1208 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1210 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1211 if (REG_NOTE_KIND (link
) == REG_INC
)
1212 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1215 /* Like record_hard_reg_sets, but called through note_uses. */
1217 record_hard_reg_uses (rtx
*px
, void *data
)
1219 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1222 /* Given an INSN, return a SET expression if this insn has only a single SET.
1223 It may also have CLOBBERs, USEs, or SET whose output
1224 will not be used, which we ignore. */
1227 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1230 int set_verified
= 1;
1233 if (GET_CODE (pat
) == PARALLEL
)
1235 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1237 rtx sub
= XVECEXP (pat
, 0, i
);
1238 switch (GET_CODE (sub
))
1245 /* We can consider insns having multiple sets, where all
1246 but one are dead as single set insns. In common case
1247 only single set is present in the pattern so we want
1248 to avoid checking for REG_UNUSED notes unless necessary.
1250 When we reach set first time, we just expect this is
1251 the single set we are looking for and only when more
1252 sets are found in the insn, we check them. */
1255 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1256 && !side_effects_p (set
))
1262 set
= sub
, set_verified
= 0;
1263 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1264 || side_effects_p (sub
))
1276 /* Given an INSN, return nonzero if it has more than one SET, else return
1280 multiple_sets (const_rtx insn
)
1285 /* INSN must be an insn. */
1286 if (! INSN_P (insn
))
1289 /* Only a PARALLEL can have multiple SETs. */
1290 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1292 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1293 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1295 /* If we have already found a SET, then return now. */
1303 /* Either zero or one SET. */
1307 /* Return nonzero if the destination of SET equals the source
1308 and there are no side effects. */
1311 set_noop_p (const_rtx set
)
1313 rtx src
= SET_SRC (set
);
1314 rtx dst
= SET_DEST (set
);
1316 if (dst
== pc_rtx
&& src
== pc_rtx
)
1319 if (MEM_P (dst
) && MEM_P (src
))
1320 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1322 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1323 return rtx_equal_p (XEXP (dst
, 0), src
)
1324 && ! BYTES_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1325 && !side_effects_p (src
);
1327 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1328 dst
= XEXP (dst
, 0);
1330 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1332 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1334 src
= SUBREG_REG (src
);
1335 dst
= SUBREG_REG (dst
);
1338 /* It is a NOOP if destination overlaps with selected src vector
1340 if (GET_CODE (src
) == VEC_SELECT
1341 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1342 && HARD_REGISTER_P (XEXP (src
, 0))
1343 && HARD_REGISTER_P (dst
))
1346 rtx par
= XEXP (src
, 1);
1347 rtx src0
= XEXP (src
, 0);
1348 int c0
= INTVAL (XVECEXP (par
, 0, 0));
1349 HOST_WIDE_INT offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1351 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1352 if (INTVAL (XVECEXP (par
, 0, i
)) != c0
+ i
)
1355 simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1356 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1359 return (REG_P (src
) && REG_P (dst
)
1360 && REGNO (src
) == REGNO (dst
));
1363 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1367 noop_move_p (const rtx_insn
*insn
)
1369 rtx pat
= PATTERN (insn
);
1371 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1374 /* Insns carrying these notes are useful later on. */
1375 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1378 /* Check the code to be executed for COND_EXEC. */
1379 if (GET_CODE (pat
) == COND_EXEC
)
1380 pat
= COND_EXEC_CODE (pat
);
1382 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1385 if (GET_CODE (pat
) == PARALLEL
)
1388 /* If nothing but SETs of registers to themselves,
1389 this insn can also be deleted. */
1390 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1392 rtx tem
= XVECEXP (pat
, 0, i
);
1394 if (GET_CODE (tem
) == USE
1395 || GET_CODE (tem
) == CLOBBER
)
1398 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1408 /* Return nonzero if register in range [REGNO, ENDREGNO)
1409 appears either explicitly or implicitly in X
1410 other than being stored into.
1412 References contained within the substructure at LOC do not count.
1413 LOC may be zero, meaning don't ignore anything. */
1416 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1420 unsigned int x_regno
;
1425 /* The contents of a REG_NONNEG note is always zero, so we must come here
1426 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1430 code
= GET_CODE (x
);
1435 x_regno
= REGNO (x
);
1437 /* If we modifying the stack, frame, or argument pointer, it will
1438 clobber a virtual register. In fact, we could be more precise,
1439 but it isn't worth it. */
1440 if ((x_regno
== STACK_POINTER_REGNUM
1441 || (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
1442 && x_regno
== ARG_POINTER_REGNUM
)
1443 || x_regno
== FRAME_POINTER_REGNUM
)
1444 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1447 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1450 /* If this is a SUBREG of a hard reg, we can see exactly which
1451 registers are being modified. Otherwise, handle normally. */
1452 if (REG_P (SUBREG_REG (x
))
1453 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1455 unsigned int inner_regno
= subreg_regno (x
);
1456 unsigned int inner_endregno
1457 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1458 ? subreg_nregs (x
) : 1);
1460 return endregno
> inner_regno
&& regno
< inner_endregno
;
1466 if (&SET_DEST (x
) != loc
1467 /* Note setting a SUBREG counts as referring to the REG it is in for
1468 a pseudo but not for hard registers since we can
1469 treat each word individually. */
1470 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1471 && loc
!= &SUBREG_REG (SET_DEST (x
))
1472 && REG_P (SUBREG_REG (SET_DEST (x
)))
1473 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1474 && refers_to_regno_p (regno
, endregno
,
1475 SUBREG_REG (SET_DEST (x
)), loc
))
1476 || (!REG_P (SET_DEST (x
))
1477 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1480 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1489 /* X does not match, so try its subexpressions. */
1491 fmt
= GET_RTX_FORMAT (code
);
1492 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1494 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1502 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1505 else if (fmt
[i
] == 'E')
1508 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1509 if (loc
!= &XVECEXP (x
, i
, j
)
1510 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1517 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1518 we check if any register number in X conflicts with the relevant register
1519 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1520 contains a MEM (we don't bother checking for memory addresses that can't
1521 conflict because we expect this to be a rare case. */
1524 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1526 unsigned int regno
, endregno
;
1528 /* If either argument is a constant, then modifying X can not
1529 affect IN. Here we look at IN, we can profitably combine
1530 CONSTANT_P (x) with the switch statement below. */
1531 if (CONSTANT_P (in
))
1535 switch (GET_CODE (x
))
1537 case STRICT_LOW_PART
:
1540 /* Overly conservative. */
1545 regno
= REGNO (SUBREG_REG (x
));
1546 if (regno
< FIRST_PSEUDO_REGISTER
)
1547 regno
= subreg_regno (x
);
1548 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1549 ? subreg_nregs (x
) : 1);
1554 endregno
= END_REGNO (x
);
1556 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1566 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1567 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1570 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1573 else if (fmt
[i
] == 'E')
1576 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1577 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1587 return reg_mentioned_p (x
, in
);
1593 /* If any register in here refers to it we return true. */
1594 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1595 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1596 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1602 gcc_assert (CONSTANT_P (x
));
1607 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1608 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1609 ignored by note_stores, but passed to FUN.
1611 FUN receives three arguments:
1612 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1613 2. the SET or CLOBBER rtx that does the store,
1614 3. the pointer DATA provided to note_stores.
1616 If the item being stored in or clobbered is a SUBREG of a hard register,
1617 the SUBREG will be passed. */
1620 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1624 if (GET_CODE (x
) == COND_EXEC
)
1625 x
= COND_EXEC_CODE (x
);
1627 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1629 rtx dest
= SET_DEST (x
);
1631 while ((GET_CODE (dest
) == SUBREG
1632 && (!REG_P (SUBREG_REG (dest
))
1633 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1634 || GET_CODE (dest
) == ZERO_EXTRACT
1635 || GET_CODE (dest
) == STRICT_LOW_PART
)
1636 dest
= XEXP (dest
, 0);
1638 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1639 each of whose first operand is a register. */
1640 if (GET_CODE (dest
) == PARALLEL
)
1642 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1643 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1644 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1647 (*fun
) (dest
, x
, data
);
1650 else if (GET_CODE (x
) == PARALLEL
)
1651 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1652 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1655 /* Like notes_stores, but call FUN for each expression that is being
1656 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1657 FUN for each expression, not any interior subexpressions. FUN receives a
1658 pointer to the expression and the DATA passed to this function.
1660 Note that this is not quite the same test as that done in reg_referenced_p
1661 since that considers something as being referenced if it is being
1662 partially set, while we do not. */
1665 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1670 switch (GET_CODE (body
))
1673 (*fun
) (&COND_EXEC_TEST (body
), data
);
1674 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1678 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1679 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1683 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1684 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1688 (*fun
) (&XEXP (body
, 0), data
);
1692 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1693 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1697 (*fun
) (&TRAP_CONDITION (body
), data
);
1701 (*fun
) (&XEXP (body
, 0), data
);
1705 case UNSPEC_VOLATILE
:
1706 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1707 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1711 if (MEM_P (XEXP (body
, 0)))
1712 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1717 rtx dest
= SET_DEST (body
);
1719 /* For sets we replace everything in source plus registers in memory
1720 expression in store and operands of a ZERO_EXTRACT. */
1721 (*fun
) (&SET_SRC (body
), data
);
1723 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1725 (*fun
) (&XEXP (dest
, 1), data
);
1726 (*fun
) (&XEXP (dest
, 2), data
);
1729 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1730 dest
= XEXP (dest
, 0);
1733 (*fun
) (&XEXP (dest
, 0), data
);
1738 /* All the other possibilities never store. */
1739 (*fun
) (pbody
, data
);
1744 /* Return nonzero if X's old contents don't survive after INSN.
1745 This will be true if X is (cc0) or if X is a register and
1746 X dies in INSN or because INSN entirely sets X.
1748 "Entirely set" means set directly and not through a SUBREG, or
1749 ZERO_EXTRACT, so no trace of the old contents remains.
1750 Likewise, REG_INC does not count.
1752 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1753 but for this use that makes no difference, since regs don't overlap
1754 during their lifetimes. Therefore, this function may be used
1755 at any time after deaths have been computed.
1757 If REG is a hard reg that occupies multiple machine registers, this
1758 function will only return 1 if each of those registers will be replaced
1762 dead_or_set_p (const_rtx insn
, const_rtx x
)
1764 unsigned int regno
, end_regno
;
1767 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1768 if (GET_CODE (x
) == CC0
)
1771 gcc_assert (REG_P (x
));
1774 end_regno
= END_REGNO (x
);
1775 for (i
= regno
; i
< end_regno
; i
++)
1776 if (! dead_or_set_regno_p (insn
, i
))
1782 /* Return TRUE iff DEST is a register or subreg of a register and
1783 doesn't change the number of words of the inner register, and any
1784 part of the register is TEST_REGNO. */
1787 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
1789 unsigned int regno
, endregno
;
1791 if (GET_CODE (dest
) == SUBREG
1792 && (((GET_MODE_SIZE (GET_MODE (dest
))
1793 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
1794 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
1795 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
1796 dest
= SUBREG_REG (dest
);
1801 regno
= REGNO (dest
);
1802 endregno
= END_REGNO (dest
);
1803 return (test_regno
>= regno
&& test_regno
< endregno
);
1806 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1807 any member matches the covers_regno_no_parallel_p criteria. */
1810 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
1812 if (GET_CODE (dest
) == PARALLEL
)
1814 /* Some targets place small structures in registers for return
1815 values of functions, and those registers are wrapped in
1816 PARALLELs that we may see as the destination of a SET. */
1819 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1821 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
1822 if (inner
!= NULL_RTX
1823 && covers_regno_no_parallel_p (inner
, test_regno
))
1830 return covers_regno_no_parallel_p (dest
, test_regno
);
1833 /* Utility function for dead_or_set_p to check an individual register. */
1836 dead_or_set_regno_p (const_rtx insn
, unsigned int test_regno
)
1840 /* See if there is a death note for something that includes TEST_REGNO. */
1841 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
1845 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
1848 pattern
= PATTERN (insn
);
1850 /* If a COND_EXEC is not executed, the value survives. */
1851 if (GET_CODE (pattern
) == COND_EXEC
)
1854 if (GET_CODE (pattern
) == SET
)
1855 return covers_regno_p (SET_DEST (pattern
), test_regno
);
1856 else if (GET_CODE (pattern
) == PARALLEL
)
1860 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
1862 rtx body
= XVECEXP (pattern
, 0, i
);
1864 if (GET_CODE (body
) == COND_EXEC
)
1865 body
= COND_EXEC_CODE (body
);
1867 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
1868 && covers_regno_p (SET_DEST (body
), test_regno
))
1876 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1877 If DATUM is nonzero, look for one whose datum is DATUM. */
1880 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
1884 gcc_checking_assert (insn
);
1886 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1887 if (! INSN_P (insn
))
1891 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1892 if (REG_NOTE_KIND (link
) == kind
)
1897 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1898 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
1903 /* Return the reg-note of kind KIND in insn INSN which applies to register
1904 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1905 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1906 it might be the case that the note overlaps REGNO. */
1909 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
1913 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1914 if (! INSN_P (insn
))
1917 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1918 if (REG_NOTE_KIND (link
) == kind
1919 /* Verify that it is a register, so that scratch and MEM won't cause a
1921 && REG_P (XEXP (link
, 0))
1922 && REGNO (XEXP (link
, 0)) <= regno
1923 && END_REGNO (XEXP (link
, 0)) > regno
)
1928 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1932 find_reg_equal_equiv_note (const_rtx insn
)
1939 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1940 if (REG_NOTE_KIND (link
) == REG_EQUAL
1941 || REG_NOTE_KIND (link
) == REG_EQUIV
)
1943 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1944 insns that have multiple sets. Checking single_set to
1945 make sure of this is not the proper check, as explained
1946 in the comment in set_unique_reg_note.
1948 This should be changed into an assert. */
1949 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
1956 /* Check whether INSN is a single_set whose source is known to be
1957 equivalent to a constant. Return that constant if so, otherwise
1961 find_constant_src (const rtx_insn
*insn
)
1965 set
= single_set (insn
);
1968 x
= avoid_constant_pool_reference (SET_SRC (set
));
1973 note
= find_reg_equal_equiv_note (insn
);
1974 if (note
&& CONSTANT_P (XEXP (note
, 0)))
1975 return XEXP (note
, 0);
1980 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1981 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1984 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
1986 /* If it's not a CALL_INSN, it can't possibly have a
1987 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1997 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
1999 link
= XEXP (link
, 1))
2000 if (GET_CODE (XEXP (link
, 0)) == code
2001 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
2006 unsigned int regno
= REGNO (datum
);
2008 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2009 to pseudo registers, so don't bother checking. */
2011 if (regno
< FIRST_PSEUDO_REGISTER
)
2013 unsigned int end_regno
= END_REGNO (datum
);
2016 for (i
= regno
; i
< end_regno
; i
++)
2017 if (find_regno_fusage (insn
, code
, i
))
2025 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2026 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2029 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2033 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2034 to pseudo registers, so don't bother checking. */
2036 if (regno
>= FIRST_PSEUDO_REGISTER
2040 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2044 if (GET_CODE (op
= XEXP (link
, 0)) == code
2045 && REG_P (reg
= XEXP (op
, 0))
2046 && REGNO (reg
) <= regno
2047 && END_REGNO (reg
) > regno
)
2055 /* Return true if KIND is an integer REG_NOTE. */
2058 int_reg_note_p (enum reg_note kind
)
2060 return kind
== REG_BR_PROB
;
2063 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2064 stored as the pointer to the next register note. */
2067 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2071 gcc_checking_assert (!int_reg_note_p (kind
));
2076 case REG_LABEL_TARGET
:
2077 case REG_LABEL_OPERAND
:
2079 /* These types of register notes use an INSN_LIST rather than an
2080 EXPR_LIST, so that copying is done right and dumps look
2082 note
= alloc_INSN_LIST (datum
, list
);
2083 PUT_REG_NOTE_KIND (note
, kind
);
2087 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2094 /* Add register note with kind KIND and datum DATUM to INSN. */
2097 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2099 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2102 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2105 add_int_reg_note (rtx insn
, enum reg_note kind
, int datum
)
2107 gcc_checking_assert (int_reg_note_p (kind
));
2108 REG_NOTES (insn
) = gen_rtx_INT_LIST ((machine_mode
) kind
,
2109 datum
, REG_NOTES (insn
));
2112 /* Add a register note like NOTE to INSN. */
2115 add_shallow_copy_of_reg_note (rtx_insn
*insn
, rtx note
)
2117 if (GET_CODE (note
) == INT_LIST
)
2118 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2120 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2123 /* Remove register note NOTE from the REG_NOTES of INSN. */
2126 remove_note (rtx insn
, const_rtx note
)
2130 if (note
== NULL_RTX
)
2133 if (REG_NOTES (insn
) == note
)
2134 REG_NOTES (insn
) = XEXP (note
, 1);
2136 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2137 if (XEXP (link
, 1) == note
)
2139 XEXP (link
, 1) = XEXP (note
, 1);
2143 switch (REG_NOTE_KIND (note
))
2147 df_notes_rescan (as_a
<rtx_insn
*> (insn
));
2154 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2157 remove_reg_equal_equiv_notes (rtx_insn
*insn
)
2161 loc
= ®_NOTES (insn
);
2164 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2165 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2166 *loc
= XEXP (*loc
, 1);
2168 loc
= &XEXP (*loc
, 1);
2172 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2175 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2182 /* This loop is a little tricky. We cannot just go down the chain because
2183 it is being modified by some actions in the loop. So we just iterate
2184 over the head. We plan to drain the list anyway. */
2185 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2187 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2188 rtx note
= find_reg_equal_equiv_note (insn
);
2190 /* This assert is generally triggered when someone deletes a REG_EQUAL
2191 or REG_EQUIV note by hacking the list manually rather than calling
2195 remove_note (insn
, note
);
2199 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2200 return 1 if it is found. A simple equality test is used to determine if
2204 in_insn_list_p (const rtx_insn_list
*listp
, const rtx_insn
*node
)
2208 for (x
= listp
; x
; x
= XEXP (x
, 1))
2209 if (node
== XEXP (x
, 0))
2215 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2216 remove that entry from the list if it is found.
2218 A simple equality test is used to determine if NODE matches. */
2221 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2223 rtx_expr_list
*temp
= *listp
;
2224 rtx_expr_list
*prev
= NULL
;
2228 if (node
== temp
->element ())
2230 /* Splice the node out of the list. */
2232 XEXP (prev
, 1) = temp
->next ();
2234 *listp
= temp
->next ();
2240 temp
= temp
->next ();
2244 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2245 remove that entry from the list if it is found.
2247 A simple equality test is used to determine if NODE matches. */
2250 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2252 rtx_insn_list
*temp
= *listp
;
2253 rtx_insn_list
*prev
= NULL
;
2257 if (node
== temp
->insn ())
2259 /* Splice the node out of the list. */
2261 XEXP (prev
, 1) = temp
->next ();
2263 *listp
= temp
->next ();
2269 temp
= temp
->next ();
2273 /* Nonzero if X contains any volatile instructions. These are instructions
2274 which may cause unpredictable machine state instructions, and thus no
2275 instructions or register uses should be moved or combined across them.
2276 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2279 volatile_insn_p (const_rtx x
)
2281 const RTX_CODE code
= GET_CODE (x
);
2299 case UNSPEC_VOLATILE
:
2304 if (MEM_VOLATILE_P (x
))
2311 /* Recursively scan the operands of this expression. */
2314 const char *const fmt
= GET_RTX_FORMAT (code
);
2317 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2321 if (volatile_insn_p (XEXP (x
, i
)))
2324 else if (fmt
[i
] == 'E')
2327 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2328 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2336 /* Nonzero if X contains any volatile memory references
2337 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2340 volatile_refs_p (const_rtx x
)
2342 const RTX_CODE code
= GET_CODE (x
);
2358 case UNSPEC_VOLATILE
:
2364 if (MEM_VOLATILE_P (x
))
2371 /* Recursively scan the operands of this expression. */
2374 const char *const fmt
= GET_RTX_FORMAT (code
);
2377 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2381 if (volatile_refs_p (XEXP (x
, i
)))
2384 else if (fmt
[i
] == 'E')
2387 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2388 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2396 /* Similar to above, except that it also rejects register pre- and post-
2400 side_effects_p (const_rtx x
)
2402 const RTX_CODE code
= GET_CODE (x
);
2419 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2420 when some combination can't be done. If we see one, don't think
2421 that we can simplify the expression. */
2422 return (GET_MODE (x
) != VOIDmode
);
2431 case UNSPEC_VOLATILE
:
2437 if (MEM_VOLATILE_P (x
))
2444 /* Recursively scan the operands of this expression. */
2447 const char *fmt
= GET_RTX_FORMAT (code
);
2450 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2454 if (side_effects_p (XEXP (x
, i
)))
2457 else if (fmt
[i
] == 'E')
2460 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2461 if (side_effects_p (XVECEXP (x
, i
, j
)))
2469 /* Return nonzero if evaluating rtx X might cause a trap.
2470 FLAGS controls how to consider MEMs. A nonzero means the context
2471 of the access may have changed from the original, such that the
2472 address may have become invalid. */
2475 may_trap_p_1 (const_rtx x
, unsigned flags
)
2481 /* We make no distinction currently, but this function is part of
2482 the internal target-hooks ABI so we keep the parameter as
2483 "unsigned flags". */
2484 bool code_changed
= flags
!= 0;
2488 code
= GET_CODE (x
);
2491 /* Handle these cases quickly. */
2503 return targetm
.unspec_may_trap_p (x
, flags
);
2505 case UNSPEC_VOLATILE
:
2511 return MEM_VOLATILE_P (x
);
2513 /* Memory ref can trap unless it's a static var or a stack slot. */
2515 /* Recognize specific pattern of stack checking probes. */
2516 if (flag_stack_check
2517 && MEM_VOLATILE_P (x
)
2518 && XEXP (x
, 0) == stack_pointer_rtx
)
2520 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2521 reference; moving it out of context such as when moving code
2522 when optimizing, might cause its address to become invalid. */
2524 || !MEM_NOTRAP_P (x
))
2526 HOST_WIDE_INT size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : 0;
2527 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2528 GET_MODE (x
), code_changed
);
2533 /* Division by a non-constant might trap. */
2538 if (HONOR_SNANS (x
))
2540 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2541 return flag_trapping_math
;
2542 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2547 /* An EXPR_LIST is used to represent a function call. This
2548 certainly may trap. */
2557 /* Some floating point comparisons may trap. */
2558 if (!flag_trapping_math
)
2560 /* ??? There is no machine independent way to check for tests that trap
2561 when COMPARE is used, though many targets do make this distinction.
2562 For instance, sparc uses CCFPE for compares which generate exceptions
2563 and CCFP for compares which do not generate exceptions. */
2566 /* But often the compare has some CC mode, so check operand
2568 if (HONOR_NANS (XEXP (x
, 0))
2569 || HONOR_NANS (XEXP (x
, 1)))
2575 if (HONOR_SNANS (x
))
2577 /* Often comparison is CC mode, so check operand modes. */
2578 if (HONOR_SNANS (XEXP (x
, 0))
2579 || HONOR_SNANS (XEXP (x
, 1)))
2584 /* Conversion of floating point might trap. */
2585 if (flag_trapping_math
&& HONOR_NANS (XEXP (x
, 0)))
2592 /* These operations don't trap even with floating point. */
2596 /* Any floating arithmetic may trap. */
2597 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2601 fmt
= GET_RTX_FORMAT (code
);
2602 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2606 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2609 else if (fmt
[i
] == 'E')
2612 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2613 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2620 /* Return nonzero if evaluating rtx X might cause a trap. */
2623 may_trap_p (const_rtx x
)
2625 return may_trap_p_1 (x
, 0);
2628 /* Same as above, but additionally return nonzero if evaluating rtx X might
2629 cause a fault. We define a fault for the purpose of this function as a
2630 erroneous execution condition that cannot be encountered during the normal
2631 execution of a valid program; the typical example is an unaligned memory
2632 access on a strict alignment machine. The compiler guarantees that it
2633 doesn't generate code that will fault from a valid program, but this
2634 guarantee doesn't mean anything for individual instructions. Consider
2635 the following example:
2637 struct S { int d; union { char *cp; int *ip; }; };
2639 int foo(struct S *s)
2647 on a strict alignment machine. In a valid program, foo will never be
2648 invoked on a structure for which d is equal to 1 and the underlying
2649 unique field of the union not aligned on a 4-byte boundary, but the
2650 expression *s->ip might cause a fault if considered individually.
2652 At the RTL level, potentially problematic expressions will almost always
2653 verify may_trap_p; for example, the above dereference can be emitted as
2654 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2655 However, suppose that foo is inlined in a caller that causes s->cp to
2656 point to a local character variable and guarantees that s->d is not set
2657 to 1; foo may have been effectively translated into pseudo-RTL as:
2660 (set (reg:SI) (mem:SI (%fp - 7)))
2662 (set (reg:QI) (mem:QI (%fp - 7)))
2664 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2665 memory reference to a stack slot, but it will certainly cause a fault
2666 on a strict alignment machine. */
2669 may_trap_or_fault_p (const_rtx x
)
2671 return may_trap_p_1 (x
, 1);
2674 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2675 i.e., an inequality. */
2678 inequality_comparisons_p (const_rtx x
)
2682 const enum rtx_code code
= GET_CODE (x
);
2710 len
= GET_RTX_LENGTH (code
);
2711 fmt
= GET_RTX_FORMAT (code
);
2713 for (i
= 0; i
< len
; i
++)
2717 if (inequality_comparisons_p (XEXP (x
, i
)))
2720 else if (fmt
[i
] == 'E')
2723 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2724 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2732 /* Replace any occurrence of FROM in X with TO. The function does
2733 not enter into CONST_DOUBLE for the replace.
2735 Note that copying is not done so X must not be shared unless all copies
2736 are to be modified. */
2739 replace_rtx (rtx x
, rtx from
, rtx to
)
2747 /* Allow this function to make replacements in EXPR_LISTs. */
2751 if (GET_CODE (x
) == SUBREG
)
2753 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
);
2755 if (CONST_INT_P (new_rtx
))
2757 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
2758 GET_MODE (SUBREG_REG (x
)),
2763 SUBREG_REG (x
) = new_rtx
;
2767 else if (GET_CODE (x
) == ZERO_EXTEND
)
2769 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
);
2771 if (CONST_INT_P (new_rtx
))
2773 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
2774 new_rtx
, GET_MODE (XEXP (x
, 0)));
2778 XEXP (x
, 0) = new_rtx
;
2783 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2784 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2787 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
);
2788 else if (fmt
[i
] == 'E')
2789 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2790 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
), from
, to
);
2796 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
2797 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
2800 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
2802 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
2804 if (JUMP_TABLE_DATA_P (x
))
2807 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
2808 int len
= GET_NUM_ELEM (vec
);
2809 for (int i
= 0; i
< len
; ++i
)
2811 rtx ref
= RTVEC_ELT (vec
, i
);
2812 if (XEXP (ref
, 0) == old_label
)
2814 XEXP (ref
, 0) = new_label
;
2815 if (update_label_nuses
)
2817 ++LABEL_NUSES (new_label
);
2818 --LABEL_NUSES (old_label
);
2825 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2826 field. This is not handled by the iterator because it doesn't
2827 handle unprinted ('0') fields. */
2828 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
2829 JUMP_LABEL (x
) = new_label
;
2831 subrtx_ptr_iterator::array_type array
;
2832 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
2837 if (GET_CODE (x
) == SYMBOL_REF
2838 && CONSTANT_POOL_ADDRESS_P (x
))
2840 rtx c
= get_pool_constant (x
);
2841 if (rtx_referenced_p (old_label
, c
))
2843 /* Create a copy of constant C; replace the label inside
2844 but do not update LABEL_NUSES because uses in constant pool
2846 rtx new_c
= copy_rtx (c
);
2847 replace_label (&new_c
, old_label
, new_label
, false);
2849 /* Add the new constant NEW_C to constant pool and replace
2850 the old reference to constant by new reference. */
2851 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
2852 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
2856 if ((GET_CODE (x
) == LABEL_REF
2857 || GET_CODE (x
) == INSN_LIST
)
2858 && XEXP (x
, 0) == old_label
)
2860 XEXP (x
, 0) = new_label
;
2861 if (update_label_nuses
)
2863 ++LABEL_NUSES (new_label
);
2864 --LABEL_NUSES (old_label
);
2872 replace_label_in_insn (rtx_insn
*insn
, rtx old_label
, rtx new_label
,
2873 bool update_label_nuses
)
2875 rtx insn_as_rtx
= insn
;
2876 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
2877 gcc_checking_assert (insn_as_rtx
== insn
);
2880 /* Return true if X is referenced in BODY. */
2883 rtx_referenced_p (const_rtx x
, const_rtx body
)
2885 subrtx_iterator::array_type array
;
2886 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
2887 if (const_rtx y
= *iter
)
2889 /* Check if a label_ref Y refers to label X. */
2890 if (GET_CODE (y
) == LABEL_REF
2892 && LABEL_REF_LABEL (y
) == x
)
2895 if (rtx_equal_p (x
, y
))
2898 /* If Y is a reference to pool constant traverse the constant. */
2899 if (GET_CODE (y
) == SYMBOL_REF
2900 && CONSTANT_POOL_ADDRESS_P (y
))
2901 iter
.substitute (get_pool_constant (y
));
2906 /* If INSN is a tablejump return true and store the label (before jump table) to
2907 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2910 tablejump_p (const rtx_insn
*insn
, rtx
*labelp
, rtx_jump_table_data
**tablep
)
2918 label
= JUMP_LABEL (insn
);
2919 if (label
!= NULL_RTX
&& !ANY_RETURN_P (label
)
2920 && (table
= NEXT_INSN (as_a
<rtx_insn
*> (label
))) != NULL_RTX
2921 && JUMP_TABLE_DATA_P (table
))
2926 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
2932 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2933 constant that is not in the constant pool and not in the condition
2934 of an IF_THEN_ELSE. */
2937 computed_jump_p_1 (const_rtx x
)
2939 const enum rtx_code code
= GET_CODE (x
);
2956 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
2957 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
2960 return (computed_jump_p_1 (XEXP (x
, 1))
2961 || computed_jump_p_1 (XEXP (x
, 2)));
2967 fmt
= GET_RTX_FORMAT (code
);
2968 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2971 && computed_jump_p_1 (XEXP (x
, i
)))
2974 else if (fmt
[i
] == 'E')
2975 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2976 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
2983 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2985 Tablejumps and casesi insns are not considered indirect jumps;
2986 we can recognize them by a (use (label_ref)). */
2989 computed_jump_p (const rtx_insn
*insn
)
2994 rtx pat
= PATTERN (insn
);
2996 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2997 if (JUMP_LABEL (insn
) != NULL
)
3000 if (GET_CODE (pat
) == PARALLEL
)
3002 int len
= XVECLEN (pat
, 0);
3003 int has_use_labelref
= 0;
3005 for (i
= len
- 1; i
>= 0; i
--)
3006 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
3007 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3010 has_use_labelref
= 1;
3014 if (! has_use_labelref
)
3015 for (i
= len
- 1; i
>= 0; i
--)
3016 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3017 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3018 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3021 else if (GET_CODE (pat
) == SET
3022 && SET_DEST (pat
) == pc_rtx
3023 && computed_jump_p_1 (SET_SRC (pat
)))
3031 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3032 the equivalent add insn and pass the result to FN, using DATA as the
3036 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3038 rtx x
= XEXP (mem
, 0);
3039 switch (GET_CODE (x
))
3044 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3045 rtx r1
= XEXP (x
, 0);
3046 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3047 return fn (mem
, x
, r1
, r1
, c
, data
);
3053 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3054 rtx r1
= XEXP (x
, 0);
3055 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3056 return fn (mem
, x
, r1
, r1
, c
, data
);
3062 rtx r1
= XEXP (x
, 0);
3063 rtx add
= XEXP (x
, 1);
3064 return fn (mem
, x
, r1
, add
, NULL
, data
);
3072 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3073 For each such autoinc operation found, call FN, passing it
3074 the innermost enclosing MEM, the operation itself, the RTX modified
3075 by the operation, two RTXs (the second may be NULL) that, once
3076 added, represent the value to be held by the modified RTX
3077 afterwards, and DATA. FN is to return 0 to continue the
3078 traversal or any other value to have it returned to the caller of
3079 for_each_inc_dec. */
3082 for_each_inc_dec (rtx x
,
3083 for_each_inc_dec_fn fn
,
3086 subrtx_var_iterator::array_type array
;
3087 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3092 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3094 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3097 iter
.skip_subrtxes ();
3104 /* Searches X for any reference to REGNO, returning the rtx of the
3105 reference found if any. Otherwise, returns NULL_RTX. */
3108 regno_use_in (unsigned int regno
, rtx x
)
3114 if (REG_P (x
) && REGNO (x
) == regno
)
3117 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3118 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3122 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3125 else if (fmt
[i
] == 'E')
3126 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3127 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3134 /* Return a value indicating whether OP, an operand of a commutative
3135 operation, is preferred as the first or second operand. The more
3136 positive the value, the stronger the preference for being the first
3140 commutative_operand_precedence (rtx op
)
3142 enum rtx_code code
= GET_CODE (op
);
3144 /* Constants always become the second operand. Prefer "nice" constants. */
3145 if (code
== CONST_INT
)
3147 if (code
== CONST_WIDE_INT
)
3149 if (code
== CONST_DOUBLE
)
3151 if (code
== CONST_FIXED
)
3153 op
= avoid_constant_pool_reference (op
);
3154 code
= GET_CODE (op
);
3156 switch (GET_RTX_CLASS (code
))
3159 if (code
== CONST_INT
)
3161 if (code
== CONST_WIDE_INT
)
3163 if (code
== CONST_DOUBLE
)
3165 if (code
== CONST_FIXED
)
3170 /* SUBREGs of objects should come second. */
3171 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3176 /* Complex expressions should be the first, so decrease priority
3177 of objects. Prefer pointer objects over non pointer objects. */
3178 if ((REG_P (op
) && REG_POINTER (op
))
3179 || (MEM_P (op
) && MEM_POINTER (op
)))
3183 case RTX_COMM_ARITH
:
3184 /* Prefer operands that are themselves commutative to be first.
3185 This helps to make things linear. In particular,
3186 (and (and (reg) (reg)) (not (reg))) is canonical. */
3190 /* If only one operand is a binary expression, it will be the first
3191 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3192 is canonical, although it will usually be further simplified. */
3196 /* Then prefer NEG and NOT. */
3197 if (code
== NEG
|| code
== NOT
)
3205 /* Return 1 iff it is necessary to swap operands of commutative operation
3206 in order to canonicalize expression. */
3209 swap_commutative_operands_p (rtx x
, rtx y
)
3211 return (commutative_operand_precedence (x
)
3212 < commutative_operand_precedence (y
));
3215 /* Return 1 if X is an autoincrement side effect and the register is
3216 not the stack pointer. */
3218 auto_inc_p (const_rtx x
)
3220 switch (GET_CODE (x
))
3228 /* There are no REG_INC notes for SP. */
3229 if (XEXP (x
, 0) != stack_pointer_rtx
)
3237 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3239 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3248 code
= GET_CODE (in
);
3249 fmt
= GET_RTX_FORMAT (code
);
3250 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3254 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3257 else if (fmt
[i
] == 'E')
3258 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3259 if (loc
== &XVECEXP (in
, i
, j
)
3260 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3266 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3267 and SUBREG_BYTE, return the bit offset where the subreg begins
3268 (counting from the least significant bit of the operand). */
3271 subreg_lsb_1 (machine_mode outer_mode
,
3272 machine_mode inner_mode
,
3273 unsigned int subreg_byte
)
3275 unsigned int bitpos
;
3279 /* A paradoxical subreg begins at bit position 0. */
3280 if (GET_MODE_PRECISION (outer_mode
) > GET_MODE_PRECISION (inner_mode
))
3283 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3284 /* If the subreg crosses a word boundary ensure that
3285 it also begins and ends on a word boundary. */
3286 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3287 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3288 && (subreg_byte
% UNITS_PER_WORD
3289 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3291 if (WORDS_BIG_ENDIAN
)
3292 word
= (GET_MODE_SIZE (inner_mode
)
3293 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3295 word
= subreg_byte
/ UNITS_PER_WORD
;
3296 bitpos
= word
* BITS_PER_WORD
;
3298 if (BYTES_BIG_ENDIAN
)
3299 byte
= (GET_MODE_SIZE (inner_mode
)
3300 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3302 byte
= subreg_byte
% UNITS_PER_WORD
;
3303 bitpos
+= byte
* BITS_PER_UNIT
;
3308 /* Given a subreg X, return the bit offset where the subreg begins
3309 (counting from the least significant bit of the reg). */
3312 subreg_lsb (const_rtx x
)
3314 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3318 /* Fill in information about a subreg of a hard register.
3319 xregno - A regno of an inner hard subreg_reg (or what will become one).
3320 xmode - The mode of xregno.
3321 offset - The byte offset.
3322 ymode - The mode of a top level SUBREG (or what may become one).
3323 info - Pointer to structure to fill in.
3325 Rather than considering one particular inner register (and thus one
3326 particular "outer" register) in isolation, this function really uses
3327 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3328 function does not check whether adding INFO->offset to XREGNO gives
3329 a valid hard register; even if INFO->offset + XREGNO is out of range,
3330 there might be another register of the same type that is in range.
3331 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3332 register, since that can depend on things like whether the final
3333 register number is even or odd. Callers that want to check whether
3334 this particular subreg can be replaced by a simple (reg ...) should
3335 use simplify_subreg_regno. */
3338 subreg_get_info (unsigned int xregno
, machine_mode xmode
,
3339 unsigned int offset
, machine_mode ymode
,
3340 struct subreg_info
*info
)
3342 int nregs_xmode
, nregs_ymode
;
3343 int mode_multiple
, nregs_multiple
;
3344 int offset_adj
, y_offset
, y_offset_adj
;
3345 int regsize_xmode
, regsize_ymode
;
3348 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3352 /* If there are holes in a non-scalar mode in registers, we expect
3353 that it is made up of its units concatenated together. */
3354 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3356 machine_mode xmode_unit
;
3358 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3359 if (GET_MODE_INNER (xmode
) == VOIDmode
)
3362 xmode_unit
= GET_MODE_INNER (xmode
);
3363 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3364 gcc_assert (nregs_xmode
3365 == (GET_MODE_NUNITS (xmode
)
3366 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3367 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3368 == (hard_regno_nregs
[xregno
][xmode_unit
]
3369 * GET_MODE_NUNITS (xmode
)));
3371 /* You can only ask for a SUBREG of a value with holes in the middle
3372 if you don't cross the holes. (Such a SUBREG should be done by
3373 picking a different register class, or doing it in memory if
3374 necessary.) An example of a value with holes is XCmode on 32-bit
3375 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3376 3 for each part, but in memory it's two 128-bit parts.
3377 Padding is assumed to be at the end (not necessarily the 'high part')
3379 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1
3380 < GET_MODE_NUNITS (xmode
))
3381 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3382 != ((offset
+ GET_MODE_SIZE (ymode
) - 1)
3383 / GET_MODE_SIZE (xmode_unit
))))
3385 info
->representable_p
= false;
3390 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3392 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3394 /* Paradoxical subregs are otherwise valid. */
3397 && GET_MODE_PRECISION (ymode
) > GET_MODE_PRECISION (xmode
))
3399 info
->representable_p
= true;
3400 /* If this is a big endian paradoxical subreg, which uses more
3401 actual hard registers than the original register, we must
3402 return a negative offset so that we find the proper highpart
3404 if (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
3405 ? REG_WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
3406 info
->offset
= nregs_xmode
- nregs_ymode
;
3409 info
->nregs
= nregs_ymode
;
3413 /* If registers store different numbers of bits in the different
3414 modes, we cannot generally form this subreg. */
3415 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3416 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3417 && (GET_MODE_SIZE (xmode
) % nregs_xmode
) == 0
3418 && (GET_MODE_SIZE (ymode
) % nregs_ymode
) == 0)
3420 regsize_xmode
= GET_MODE_SIZE (xmode
) / nregs_xmode
;
3421 regsize_ymode
= GET_MODE_SIZE (ymode
) / nregs_ymode
;
3422 if (!rknown
&& regsize_xmode
> regsize_ymode
&& nregs_ymode
> 1)
3424 info
->representable_p
= false;
3426 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3427 info
->offset
= offset
/ regsize_xmode
;
3430 if (!rknown
&& regsize_ymode
> regsize_xmode
&& nregs_xmode
> 1)
3432 info
->representable_p
= false;
3434 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3435 info
->offset
= offset
/ regsize_xmode
;
3438 /* Quick exit for the simple and common case of extracting whole
3439 subregisters from a multiregister value. */
3440 /* ??? It would be better to integrate this into the code below,
3441 if we can generalize the concept enough and figure out how
3442 odd-sized modes can coexist with the other weird cases we support. */
3444 && WORDS_BIG_ENDIAN
== REG_WORDS_BIG_ENDIAN
3445 && regsize_xmode
== regsize_ymode
3446 && (offset
% regsize_ymode
) == 0)
3448 info
->representable_p
= true;
3449 info
->nregs
= nregs_ymode
;
3450 info
->offset
= offset
/ regsize_ymode
;
3451 gcc_assert (info
->offset
+ info
->nregs
<= nregs_xmode
);
3456 /* Lowpart subregs are otherwise valid. */
3457 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3459 info
->representable_p
= true;
3462 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3465 info
->nregs
= nregs_ymode
;
3470 /* This should always pass, otherwise we don't know how to verify
3471 the constraint. These conditions may be relaxed but
3472 subreg_regno_offset would need to be redesigned. */
3473 gcc_assert ((GET_MODE_SIZE (xmode
) % GET_MODE_SIZE (ymode
)) == 0);
3474 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3476 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
3477 && GET_MODE_SIZE (xmode
) > UNITS_PER_WORD
)
3479 HOST_WIDE_INT xsize
= GET_MODE_SIZE (xmode
);
3480 HOST_WIDE_INT ysize
= GET_MODE_SIZE (ymode
);
3481 HOST_WIDE_INT off_low
= offset
& (ysize
- 1);
3482 HOST_WIDE_INT off_high
= offset
& ~(ysize
- 1);
3483 offset
= (xsize
- ysize
- off_high
) | off_low
;
3485 /* The XMODE value can be seen as a vector of NREGS_XMODE
3486 values. The subreg must represent a lowpart of given field.
3487 Compute what field it is. */
3488 offset_adj
= offset
;
3489 offset_adj
-= subreg_lowpart_offset (ymode
,
3490 mode_for_size (GET_MODE_BITSIZE (xmode
)
3494 /* Size of ymode must not be greater than the size of xmode. */
3495 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
3496 gcc_assert (mode_multiple
!= 0);
3498 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
3499 y_offset_adj
= offset_adj
/ GET_MODE_SIZE (ymode
);
3500 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
3502 gcc_assert ((offset_adj
% GET_MODE_SIZE (ymode
)) == 0);
3503 gcc_assert ((mode_multiple
% nregs_multiple
) == 0);
3507 info
->representable_p
= (!(y_offset_adj
% (mode_multiple
/ nregs_multiple
)));
3510 info
->offset
= (y_offset
/ (mode_multiple
/ nregs_multiple
)) * nregs_ymode
;
3511 info
->nregs
= nregs_ymode
;
3514 /* This function returns the regno offset of a subreg expression.
3515 xregno - A regno of an inner hard subreg_reg (or what will become one).
3516 xmode - The mode of xregno.
3517 offset - The byte offset.
3518 ymode - The mode of a top level SUBREG (or what may become one).
3519 RETURN - The regno offset which would be used. */
3521 subreg_regno_offset (unsigned int xregno
, machine_mode xmode
,
3522 unsigned int offset
, machine_mode ymode
)
3524 struct subreg_info info
;
3525 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3529 /* This function returns true when the offset is representable via
3530 subreg_offset in the given regno.
3531 xregno - A regno of an inner hard subreg_reg (or what will become one).
3532 xmode - The mode of xregno.
3533 offset - The byte offset.
3534 ymode - The mode of a top level SUBREG (or what may become one).
3535 RETURN - Whether the offset is representable. */
3537 subreg_offset_representable_p (unsigned int xregno
, machine_mode xmode
,
3538 unsigned int offset
, machine_mode ymode
)
3540 struct subreg_info info
;
3541 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3542 return info
.representable_p
;
3545 /* Return the number of a YMODE register to which
3547 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3549 can be simplified. Return -1 if the subreg can't be simplified.
3551 XREGNO is a hard register number. */
3554 simplify_subreg_regno (unsigned int xregno
, machine_mode xmode
,
3555 unsigned int offset
, machine_mode ymode
)
3557 struct subreg_info info
;
3558 unsigned int yregno
;
3560 #ifdef CANNOT_CHANGE_MODE_CLASS
3561 /* Give the backend a chance to disallow the mode change. */
3562 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3563 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3564 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3565 /* We can use mode change in LRA for some transformations. */
3566 && ! lra_in_progress
)
3570 /* We shouldn't simplify stack-related registers. */
3571 if ((!reload_completed
|| frame_pointer_needed
)
3572 && xregno
== FRAME_POINTER_REGNUM
)
3575 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3576 && xregno
== ARG_POINTER_REGNUM
)
3579 if (xregno
== STACK_POINTER_REGNUM
3580 /* We should convert hard stack register in LRA if it is
3582 && ! lra_in_progress
)
3585 /* Try to get the register offset. */
3586 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3587 if (!info
.representable_p
)
3590 /* Make sure that the offsetted register value is in range. */
3591 yregno
= xregno
+ info
.offset
;
3592 if (!HARD_REGISTER_NUM_P (yregno
))
3595 /* See whether (reg:YMODE YREGNO) is valid.
3597 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3598 This is a kludge to work around how complex FP arguments are passed
3599 on IA-64 and should be fixed. See PR target/49226. */
3600 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3601 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3604 return (int) yregno
;
3607 /* Return the final regno that a subreg expression refers to. */
3609 subreg_regno (const_rtx x
)
3612 rtx subreg
= SUBREG_REG (x
);
3613 int regno
= REGNO (subreg
);
3615 ret
= regno
+ subreg_regno_offset (regno
,
3623 /* Return the number of registers that a subreg expression refers
3626 subreg_nregs (const_rtx x
)
3628 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3631 /* Return the number of registers that a subreg REG with REGNO
3632 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3633 changed so that the regno can be passed in. */
3636 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3638 struct subreg_info info
;
3639 rtx subreg
= SUBREG_REG (x
);
3641 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3647 struct parms_set_data
3653 /* Helper function for noticing stores to parameter registers. */
3655 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3657 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3658 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3659 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3661 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3666 /* Look backward for first parameter to be loaded.
3667 Note that loads of all parameters will not necessarily be
3668 found if CSE has eliminated some of them (e.g., an argument
3669 to the outer function is passed down as a parameter).
3670 Do not skip BOUNDARY. */
3672 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
3674 struct parms_set_data parm
;
3676 rtx_insn
*before
, *first_set
;
3678 /* Since different machines initialize their parameter registers
3679 in different orders, assume nothing. Collect the set of all
3680 parameter registers. */
3681 CLEAR_HARD_REG_SET (parm
.regs
);
3683 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3684 if (GET_CODE (XEXP (p
, 0)) == USE
3685 && REG_P (XEXP (XEXP (p
, 0), 0)))
3687 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3689 /* We only care about registers which can hold function
3691 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3694 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3698 first_set
= call_insn
;
3700 /* Search backward for the first set of a register in this set. */
3701 while (parm
.nregs
&& before
!= boundary
)
3703 before
= PREV_INSN (before
);
3705 /* It is possible that some loads got CSEed from one call to
3706 another. Stop in that case. */
3707 if (CALL_P (before
))
3710 /* Our caller needs either ensure that we will find all sets
3711 (in case code has not been optimized yet), or take care
3712 for possible labels in a way by setting boundary to preceding
3714 if (LABEL_P (before
))
3716 gcc_assert (before
== boundary
);
3720 if (INSN_P (before
))
3722 int nregs_old
= parm
.nregs
;
3723 note_stores (PATTERN (before
), parms_set
, &parm
);
3724 /* If we found something that did not set a parameter reg,
3725 we're done. Do not keep going, as that might result
3726 in hoisting an insn before the setting of a pseudo
3727 that is used by the hoisted insn. */
3728 if (nregs_old
!= parm
.nregs
)
3737 /* Return true if we should avoid inserting code between INSN and preceding
3738 call instruction. */
3741 keep_with_call_p (const rtx_insn
*insn
)
3745 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
3747 if (REG_P (SET_DEST (set
))
3748 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
3749 && fixed_regs
[REGNO (SET_DEST (set
))]
3750 && general_operand (SET_SRC (set
), VOIDmode
))
3752 if (REG_P (SET_SRC (set
))
3753 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
3754 && REG_P (SET_DEST (set
))
3755 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3757 /* There may be a stack pop just after the call and before the store
3758 of the return register. Search for the actual store when deciding
3759 if we can break or not. */
3760 if (SET_DEST (set
) == stack_pointer_rtx
)
3762 /* This CONST_CAST is okay because next_nonnote_insn just
3763 returns its argument and we assign it to a const_rtx
3766 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
3767 if (i2
&& keep_with_call_p (i2
))
3774 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3775 to non-complex jumps. That is, direct unconditional, conditional,
3776 and tablejumps, but not computed jumps or returns. It also does
3777 not apply to the fallthru case of a conditional jump. */
3780 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
3782 rtx tmp
= JUMP_LABEL (jump_insn
);
3783 rtx_jump_table_data
*table
;
3788 if (tablejump_p (jump_insn
, NULL
, &table
))
3790 rtvec vec
= table
->get_labels ();
3791 int i
, veclen
= GET_NUM_ELEM (vec
);
3793 for (i
= 0; i
< veclen
; ++i
)
3794 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
3798 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
3805 /* Return an estimate of the cost of computing rtx X.
3806 One use is in cse, to decide which expression to keep in the hash table.
3807 Another is in rtl generation, to pick the cheapest way to multiply.
3808 Other uses like the latter are expected in the future.
3810 X appears as operand OPNO in an expression with code OUTER_CODE.
3811 SPEED specifies whether costs optimized for speed or size should
3815 rtx_cost (rtx x
, enum rtx_code outer_code
, int opno
, bool speed
)
3826 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3827 many insns, taking N times as long. */
3828 factor
= GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
;
3832 /* Compute the default costs of certain things.
3833 Note that targetm.rtx_costs can override the defaults. */
3835 code
= GET_CODE (x
);
3839 /* Multiplication has time-complexity O(N*N), where N is the
3840 number of units (translated from digits) when using
3841 schoolbook long multiplication. */
3842 total
= factor
* factor
* COSTS_N_INSNS (5);
3848 /* Similarly, complexity for schoolbook long division. */
3849 total
= factor
* factor
* COSTS_N_INSNS (7);
3852 /* Used in combine.c as a marker. */
3856 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3857 the mode for the factor. */
3858 factor
= GET_MODE_SIZE (GET_MODE (SET_DEST (x
))) / UNITS_PER_WORD
;
3863 total
= factor
* COSTS_N_INSNS (1);
3873 /* If we can't tie these modes, make this expensive. The larger
3874 the mode, the more expensive it is. */
3875 if (! MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (SUBREG_REG (x
))))
3876 return COSTS_N_INSNS (2 + factor
);
3880 if (targetm
.rtx_costs (x
, code
, outer_code
, opno
, &total
, speed
))
3885 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3886 which is already in total. */
3888 fmt
= GET_RTX_FORMAT (code
);
3889 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3891 total
+= rtx_cost (XEXP (x
, i
), code
, i
, speed
);
3892 else if (fmt
[i
] == 'E')
3893 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3894 total
+= rtx_cost (XVECEXP (x
, i
, j
), code
, i
, speed
);
3899 /* Fill in the structure C with information about both speed and size rtx
3900 costs for X, which is operand OPNO in an expression with code OUTER. */
3903 get_full_rtx_cost (rtx x
, enum rtx_code outer
, int opno
,
3904 struct full_rtx_costs
*c
)
3906 c
->speed
= rtx_cost (x
, outer
, opno
, true);
3907 c
->size
= rtx_cost (x
, outer
, opno
, false);
3911 /* Return cost of address expression X.
3912 Expect that X is properly formed address reference.
3914 SPEED parameter specify whether costs optimized for speed or size should
3918 address_cost (rtx x
, machine_mode mode
, addr_space_t as
, bool speed
)
3920 /* We may be asked for cost of various unusual addresses, such as operands
3921 of push instruction. It is not worthwhile to complicate writing
3922 of the target hook by such cases. */
3924 if (!memory_address_addr_space_p (mode
, x
, as
))
3927 return targetm
.address_cost (x
, mode
, as
, speed
);
3930 /* If the target doesn't override, compute the cost as with arithmetic. */
3933 default_address_cost (rtx x
, machine_mode
, addr_space_t
, bool speed
)
3935 return rtx_cost (x
, MEM
, 0, speed
);
3939 unsigned HOST_WIDE_INT
3940 nonzero_bits (const_rtx x
, machine_mode mode
)
3942 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3946 num_sign_bit_copies (const_rtx x
, machine_mode mode
)
3948 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3951 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3952 It avoids exponential behavior in nonzero_bits1 when X has
3953 identical subexpressions on the first or the second level. */
3955 static unsigned HOST_WIDE_INT
3956 cached_nonzero_bits (const_rtx x
, machine_mode mode
, const_rtx known_x
,
3957 machine_mode known_mode
,
3958 unsigned HOST_WIDE_INT known_ret
)
3960 if (x
== known_x
&& mode
== known_mode
)
3963 /* Try to find identical subexpressions. If found call
3964 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3965 precomputed value for the subexpression as KNOWN_RET. */
3967 if (ARITHMETIC_P (x
))
3969 rtx x0
= XEXP (x
, 0);
3970 rtx x1
= XEXP (x
, 1);
3972 /* Check the first level. */
3974 return nonzero_bits1 (x
, mode
, x0
, mode
,
3975 cached_nonzero_bits (x0
, mode
, known_x
,
3976 known_mode
, known_ret
));
3978 /* Check the second level. */
3979 if (ARITHMETIC_P (x0
)
3980 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
3981 return nonzero_bits1 (x
, mode
, x1
, mode
,
3982 cached_nonzero_bits (x1
, mode
, known_x
,
3983 known_mode
, known_ret
));
3985 if (ARITHMETIC_P (x1
)
3986 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
3987 return nonzero_bits1 (x
, mode
, x0
, mode
,
3988 cached_nonzero_bits (x0
, mode
, known_x
,
3989 known_mode
, known_ret
));
3992 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
3995 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3996 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3997 is less useful. We can't allow both, because that results in exponential
3998 run time recursion. There is a nullstone testcase that triggered
3999 this. This macro avoids accidental uses of num_sign_bit_copies. */
4000 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4002 /* Given an expression, X, compute which bits in X can be nonzero.
4003 We don't care about bits outside of those defined in MODE.
4005 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
4006 an arithmetic operation, we can do better. */
4008 static unsigned HOST_WIDE_INT
4009 nonzero_bits1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4010 machine_mode known_mode
,
4011 unsigned HOST_WIDE_INT known_ret
)
4013 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4014 unsigned HOST_WIDE_INT inner_nz
;
4016 machine_mode inner_mode
;
4017 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4019 /* For floating-point and vector values, assume all bits are needed. */
4020 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
4021 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4024 /* If X is wider than MODE, use its mode instead. */
4025 if (GET_MODE_PRECISION (GET_MODE (x
)) > mode_width
)
4027 mode
= GET_MODE (x
);
4028 nonzero
= GET_MODE_MASK (mode
);
4029 mode_width
= GET_MODE_PRECISION (mode
);
4032 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4033 /* Our only callers in this case look for single bit values. So
4034 just return the mode mask. Those tests will then be false. */
4037 #ifndef WORD_REGISTER_OPERATIONS
4038 /* If MODE is wider than X, but both are a single word for both the host
4039 and target machines, we can compute this from which bits of the
4040 object might be nonzero in its own mode, taking into account the fact
4041 that on many CISC machines, accessing an object in a wider mode
4042 causes the high-order bits to become undefined. So they are
4043 not known to be zero. */
4045 if (GET_MODE (x
) != VOIDmode
&& GET_MODE (x
) != mode
4046 && GET_MODE_PRECISION (GET_MODE (x
)) <= BITS_PER_WORD
4047 && GET_MODE_PRECISION (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
4048 && GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (GET_MODE (x
)))
4050 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
4051 known_x
, known_mode
, known_ret
);
4052 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
4057 code
= GET_CODE (x
);
4061 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4062 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4063 all the bits above ptr_mode are known to be zero. */
4064 /* As we do not know which address space the pointer is referring to,
4065 we can do this only if the target does not support different pointer
4066 or address modes depending on the address space. */
4067 if (target_default_pointer_address_modes_p ()
4068 && POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4070 nonzero
&= GET_MODE_MASK (ptr_mode
);
4073 /* Include declared information about alignment of pointers. */
4074 /* ??? We don't properly preserve REG_POINTER changes across
4075 pointer-to-integer casts, so we can't trust it except for
4076 things that we know must be pointers. See execute/960116-1.c. */
4077 if ((x
== stack_pointer_rtx
4078 || x
== frame_pointer_rtx
4079 || x
== arg_pointer_rtx
)
4080 && REGNO_POINTER_ALIGN (REGNO (x
)))
4082 unsigned HOST_WIDE_INT alignment
4083 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4085 #ifdef PUSH_ROUNDING
4086 /* If PUSH_ROUNDING is defined, it is possible for the
4087 stack to be momentarily aligned only to that amount,
4088 so we pick the least alignment. */
4089 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4090 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
4094 nonzero
&= ~(alignment
- 1);
4098 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4099 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
4100 known_mode
, known_ret
,
4104 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4105 known_mode
, known_ret
);
4107 return nonzero_for_hook
;
4111 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4112 /* If X is negative in MODE, sign-extend the value. */
4114 && mode_width
< BITS_PER_WORD
4115 && (UINTVAL (x
) & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
4117 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4123 #ifdef LOAD_EXTEND_OP
4124 /* In many, if not most, RISC machines, reading a byte from memory
4125 zeros the rest of the register. Noticing that fact saves a lot
4126 of extra zero-extends. */
4127 if (LOAD_EXTEND_OP (GET_MODE (x
)) == ZERO_EXTEND
)
4128 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
4133 case UNEQ
: case LTGT
:
4134 case GT
: case GTU
: case UNGT
:
4135 case LT
: case LTU
: case UNLT
:
4136 case GE
: case GEU
: case UNGE
:
4137 case LE
: case LEU
: case UNLE
:
4138 case UNORDERED
: case ORDERED
:
4139 /* If this produces an integer result, we know which bits are set.
4140 Code here used to clear bits outside the mode of X, but that is
4142 /* Mind that MODE is the mode the caller wants to look at this
4143 operation in, and not the actual operation mode. We can wind
4144 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4145 that describes the results of a vector compare. */
4146 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
4147 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4148 nonzero
= STORE_FLAG_VALUE
;
4153 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4154 and num_sign_bit_copies. */
4155 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4156 == GET_MODE_PRECISION (GET_MODE (x
)))
4160 if (GET_MODE_PRECISION (GET_MODE (x
)) < mode_width
)
4161 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
4166 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4167 and num_sign_bit_copies. */
4168 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4169 == GET_MODE_PRECISION (GET_MODE (x
)))
4175 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4176 known_x
, known_mode
, known_ret
)
4177 & GET_MODE_MASK (mode
));
4181 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4182 known_x
, known_mode
, known_ret
);
4183 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4184 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4188 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4189 Otherwise, show all the bits in the outer mode but not the inner
4191 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4192 known_x
, known_mode
, known_ret
);
4193 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4195 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4196 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4197 inner_nz
|= (GET_MODE_MASK (mode
)
4198 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4201 nonzero
&= inner_nz
;
4205 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4206 known_x
, known_mode
, known_ret
)
4207 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4208 known_x
, known_mode
, known_ret
);
4212 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4214 unsigned HOST_WIDE_INT nonzero0
4215 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4216 known_x
, known_mode
, known_ret
);
4218 /* Don't call nonzero_bits for the second time if it cannot change
4220 if ((nonzero
& nonzero0
) != nonzero
)
4222 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4223 known_x
, known_mode
, known_ret
);
4227 case PLUS
: case MINUS
:
4229 case DIV
: case UDIV
:
4230 case MOD
: case UMOD
:
4231 /* We can apply the rules of arithmetic to compute the number of
4232 high- and low-order zero bits of these operations. We start by
4233 computing the width (position of the highest-order nonzero bit)
4234 and the number of low-order zero bits for each value. */
4236 unsigned HOST_WIDE_INT nz0
4237 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4238 known_x
, known_mode
, known_ret
);
4239 unsigned HOST_WIDE_INT nz1
4240 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4241 known_x
, known_mode
, known_ret
);
4242 int sign_index
= GET_MODE_PRECISION (GET_MODE (x
)) - 1;
4243 int width0
= floor_log2 (nz0
) + 1;
4244 int width1
= floor_log2 (nz1
) + 1;
4245 int low0
= floor_log2 (nz0
& -nz0
);
4246 int low1
= floor_log2 (nz1
& -nz1
);
4247 unsigned HOST_WIDE_INT op0_maybe_minusp
4248 = nz0
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4249 unsigned HOST_WIDE_INT op1_maybe_minusp
4250 = nz1
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4251 unsigned int result_width
= mode_width
;
4257 result_width
= MAX (width0
, width1
) + 1;
4258 result_low
= MIN (low0
, low1
);
4261 result_low
= MIN (low0
, low1
);
4264 result_width
= width0
+ width1
;
4265 result_low
= low0
+ low1
;
4270 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4271 result_width
= width0
;
4276 result_width
= width0
;
4281 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4282 result_width
= MIN (width0
, width1
);
4283 result_low
= MIN (low0
, low1
);
4288 result_width
= MIN (width0
, width1
);
4289 result_low
= MIN (low0
, low1
);
4295 if (result_width
< mode_width
)
4296 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << result_width
) - 1;
4299 nonzero
&= ~(((unsigned HOST_WIDE_INT
) 1 << result_low
) - 1);
4304 if (CONST_INT_P (XEXP (x
, 1))
4305 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4306 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (x
, 1))) - 1;
4310 /* If this is a SUBREG formed for a promoted variable that has
4311 been zero-extended, we know that at least the high-order bits
4312 are zero, though others might be too. */
4314 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4315 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4316 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4317 known_x
, known_mode
, known_ret
);
4319 inner_mode
= GET_MODE (SUBREG_REG (x
));
4320 /* If the inner mode is a single word for both the host and target
4321 machines, we can compute this from which bits of the inner
4322 object might be nonzero. */
4323 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4324 && (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
))
4326 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4327 known_x
, known_mode
, known_ret
);
4329 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4330 /* If this is a typical RISC machine, we only have to worry
4331 about the way loads are extended. */
4332 if ((LOAD_EXTEND_OP (inner_mode
) == SIGN_EXTEND
4333 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4334 : LOAD_EXTEND_OP (inner_mode
) != ZERO_EXTEND
)
4335 || !MEM_P (SUBREG_REG (x
)))
4338 /* On many CISC machines, accessing an object in a wider mode
4339 causes the high-order bits to become undefined. So they are
4340 not known to be zero. */
4341 if (GET_MODE_PRECISION (GET_MODE (x
))
4342 > GET_MODE_PRECISION (inner_mode
))
4343 nonzero
|= (GET_MODE_MASK (GET_MODE (x
))
4344 & ~GET_MODE_MASK (inner_mode
));
4353 /* The nonzero bits are in two classes: any bits within MODE
4354 that aren't in GET_MODE (x) are always significant. The rest of the
4355 nonzero bits are those that are significant in the operand of
4356 the shift when shifted the appropriate number of bits. This
4357 shows that high-order bits are cleared by the right shift and
4358 low-order bits by left shifts. */
4359 if (CONST_INT_P (XEXP (x
, 1))
4360 && INTVAL (XEXP (x
, 1)) >= 0
4361 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4362 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4364 machine_mode inner_mode
= GET_MODE (x
);
4365 unsigned int width
= GET_MODE_PRECISION (inner_mode
);
4366 int count
= INTVAL (XEXP (x
, 1));
4367 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4368 unsigned HOST_WIDE_INT op_nonzero
4369 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4370 known_x
, known_mode
, known_ret
);
4371 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4372 unsigned HOST_WIDE_INT outer
= 0;
4374 if (mode_width
> width
)
4375 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4377 if (code
== LSHIFTRT
)
4379 else if (code
== ASHIFTRT
)
4383 /* If the sign bit may have been nonzero before the shift, we
4384 need to mark all the places it could have been copied to
4385 by the shift as possibly nonzero. */
4386 if (inner
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1 - count
)))
4387 inner
|= (((unsigned HOST_WIDE_INT
) 1 << count
) - 1)
4390 else if (code
== ASHIFT
)
4393 inner
= ((inner
<< (count
% width
)
4394 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4396 nonzero
&= (outer
| inner
);
4402 /* This is at most the number of bits in the mode. */
4403 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4407 /* If CLZ has a known value at zero, then the nonzero bits are
4408 that value, plus the number of bits in the mode minus one. */
4409 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4411 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4417 /* If CTZ has a known value at zero, then the nonzero bits are
4418 that value, plus the number of bits in the mode minus one. */
4419 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4421 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4427 /* This is at most the number of bits in the mode minus 1. */
4428 nonzero
= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4437 unsigned HOST_WIDE_INT nonzero_true
4438 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4439 known_x
, known_mode
, known_ret
);
4441 /* Don't call nonzero_bits for the second time if it cannot change
4443 if ((nonzero
& nonzero_true
) != nonzero
)
4444 nonzero
&= nonzero_true
4445 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4446 known_x
, known_mode
, known_ret
);
4457 /* See the macro definition above. */
4458 #undef cached_num_sign_bit_copies
4461 /* The function cached_num_sign_bit_copies is a wrapper around
4462 num_sign_bit_copies1. It avoids exponential behavior in
4463 num_sign_bit_copies1 when X has identical subexpressions on the
4464 first or the second level. */
4467 cached_num_sign_bit_copies (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4468 machine_mode known_mode
,
4469 unsigned int known_ret
)
4471 if (x
== known_x
&& mode
== known_mode
)
4474 /* Try to find identical subexpressions. If found call
4475 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4476 the precomputed value for the subexpression as KNOWN_RET. */
4478 if (ARITHMETIC_P (x
))
4480 rtx x0
= XEXP (x
, 0);
4481 rtx x1
= XEXP (x
, 1);
4483 /* Check the first level. */
4486 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4487 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4491 /* Check the second level. */
4492 if (ARITHMETIC_P (x0
)
4493 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4495 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4496 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4500 if (ARITHMETIC_P (x1
)
4501 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4503 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4504 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4509 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4512 /* Return the number of bits at the high-order end of X that are known to
4513 be equal to the sign bit. X will be used in mode MODE; if MODE is
4514 VOIDmode, X will be used in its own mode. The returned value will always
4515 be between 1 and the number of bits in MODE. */
4518 num_sign_bit_copies1 (const_rtx x
, machine_mode mode
, const_rtx known_x
,
4519 machine_mode known_mode
,
4520 unsigned int known_ret
)
4522 enum rtx_code code
= GET_CODE (x
);
4523 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4524 int num0
, num1
, result
;
4525 unsigned HOST_WIDE_INT nonzero
;
4527 /* If we weren't given a mode, use the mode of X. If the mode is still
4528 VOIDmode, we don't know anything. Likewise if one of the modes is
4531 if (mode
== VOIDmode
)
4532 mode
= GET_MODE (x
);
4534 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4535 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4538 /* For a smaller object, just ignore the high bits. */
4539 if (bitwidth
< GET_MODE_PRECISION (GET_MODE (x
)))
4541 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4542 known_x
, known_mode
, known_ret
);
4544 num0
- (int) (GET_MODE_PRECISION (GET_MODE (x
)) - bitwidth
));
4547 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_PRECISION (GET_MODE (x
)))
4549 #ifndef WORD_REGISTER_OPERATIONS
4550 /* If this machine does not do all register operations on the entire
4551 register and MODE is wider than the mode of X, we can say nothing
4552 at all about the high-order bits. */
4555 /* Likewise on machines that do, if the mode of the object is smaller
4556 than a word and loads of that size don't sign extend, we can say
4557 nothing about the high order bits. */
4558 if (GET_MODE_PRECISION (GET_MODE (x
)) < BITS_PER_WORD
4559 #ifdef LOAD_EXTEND_OP
4560 && LOAD_EXTEND_OP (GET_MODE (x
)) != SIGN_EXTEND
4571 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4572 /* If pointers extend signed and this is a pointer in Pmode, say that
4573 all the bits above ptr_mode are known to be sign bit copies. */
4574 /* As we do not know which address space the pointer is referring to,
4575 we can do this only if the target does not support different pointer
4576 or address modes depending on the address space. */
4577 if (target_default_pointer_address_modes_p ()
4578 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4579 && mode
== Pmode
&& REG_POINTER (x
))
4580 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
4584 unsigned int copies_for_hook
= 1, copies
= 1;
4585 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4586 known_mode
, known_ret
,
4590 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4591 known_mode
, known_ret
);
4593 if (copies
> 1 || copies_for_hook
> 1)
4594 return MAX (copies
, copies_for_hook
);
4596 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4601 #ifdef LOAD_EXTEND_OP
4602 /* Some RISC machines sign-extend all loads of smaller than a word. */
4603 if (LOAD_EXTEND_OP (GET_MODE (x
)) == SIGN_EXTEND
)
4604 return MAX (1, ((int) bitwidth
4605 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1));
4610 /* If the constant is negative, take its 1's complement and remask.
4611 Then see how many zero bits we have. */
4612 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4613 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4614 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4615 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4617 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4620 /* If this is a SUBREG for a promoted object that is sign-extended
4621 and we are looking at it in a wider mode, we know that at least the
4622 high-order bits are known to be sign bit copies. */
4624 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
4626 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4627 known_x
, known_mode
, known_ret
);
4628 return MAX ((int) bitwidth
4629 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1,
4633 /* For a smaller object, just ignore the high bits. */
4634 if (bitwidth
<= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
))))
4636 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4637 known_x
, known_mode
, known_ret
);
4638 return MAX (1, (num0
4639 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
)))
4643 #ifdef WORD_REGISTER_OPERATIONS
4644 #ifdef LOAD_EXTEND_OP
4645 /* For paradoxical SUBREGs on machines where all register operations
4646 affect the entire register, just look inside. Note that we are
4647 passing MODE to the recursive call, so the number of sign bit copies
4648 will remain relative to that mode, not the inner mode. */
4650 /* This works only if loads sign extend. Otherwise, if we get a
4651 reload for the inner part, it may be loaded from the stack, and
4652 then we lose all sign bit copies that existed before the store
4655 if (paradoxical_subreg_p (x
)
4656 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4657 && MEM_P (SUBREG_REG (x
)))
4658 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4659 known_x
, known_mode
, known_ret
);
4665 if (CONST_INT_P (XEXP (x
, 1)))
4666 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
4670 return (bitwidth
- GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4671 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4672 known_x
, known_mode
, known_ret
));
4675 /* For a smaller object, just ignore the high bits. */
4676 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4677 known_x
, known_mode
, known_ret
);
4678 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4682 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4683 known_x
, known_mode
, known_ret
);
4685 case ROTATE
: case ROTATERT
:
4686 /* If we are rotating left by a number of bits less than the number
4687 of sign bit copies, we can just subtract that amount from the
4689 if (CONST_INT_P (XEXP (x
, 1))
4690 && INTVAL (XEXP (x
, 1)) >= 0
4691 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
4693 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4694 known_x
, known_mode
, known_ret
);
4695 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
4696 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
4701 /* In general, this subtracts one sign bit copy. But if the value
4702 is known to be positive, the number of sign bit copies is the
4703 same as that of the input. Finally, if the input has just one bit
4704 that might be nonzero, all the bits are copies of the sign bit. */
4705 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4706 known_x
, known_mode
, known_ret
);
4707 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4708 return num0
> 1 ? num0
- 1 : 1;
4710 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4715 && (((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
))
4720 case IOR
: case AND
: case XOR
:
4721 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
4722 /* Logical operations will preserve the number of sign-bit copies.
4723 MIN and MAX operations always return one of the operands. */
4724 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4725 known_x
, known_mode
, known_ret
);
4726 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4727 known_x
, known_mode
, known_ret
);
4729 /* If num1 is clearing some of the top bits then regardless of
4730 the other term, we are guaranteed to have at least that many
4731 high-order zero bits. */
4734 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4735 && CONST_INT_P (XEXP (x
, 1))
4736 && (UINTVAL (XEXP (x
, 1))
4737 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) == 0)
4740 /* Similarly for IOR when setting high-order bits. */
4743 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4744 && CONST_INT_P (XEXP (x
, 1))
4745 && (UINTVAL (XEXP (x
, 1))
4746 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4749 return MIN (num0
, num1
);
4751 case PLUS
: case MINUS
:
4752 /* For addition and subtraction, we can have a 1-bit carry. However,
4753 if we are subtracting 1 from a positive number, there will not
4754 be such a carry. Furthermore, if the positive number is known to
4755 be 0 or 1, we know the result is either -1 or 0. */
4757 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
4758 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
4760 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4761 if ((((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
) == 0)
4762 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
4763 : bitwidth
- floor_log2 (nonzero
) - 1);
4766 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4767 known_x
, known_mode
, known_ret
);
4768 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4769 known_x
, known_mode
, known_ret
);
4770 result
= MAX (1, MIN (num0
, num1
) - 1);
4775 /* The number of bits of the product is the sum of the number of
4776 bits of both terms. However, unless one of the terms if known
4777 to be positive, we must allow for an additional bit since negating
4778 a negative number can remove one sign bit copy. */
4780 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4781 known_x
, known_mode
, known_ret
);
4782 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4783 known_x
, known_mode
, known_ret
);
4785 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
4787 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4788 || (((nonzero_bits (XEXP (x
, 0), mode
)
4789 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4790 && ((nonzero_bits (XEXP (x
, 1), mode
)
4791 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)))
4795 return MAX (1, result
);
4798 /* The result must be <= the first operand. If the first operand
4799 has the high bit set, we know nothing about the number of sign
4801 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4803 else if ((nonzero_bits (XEXP (x
, 0), mode
)
4804 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4807 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4808 known_x
, known_mode
, known_ret
);
4811 /* The result must be <= the second operand. If the second operand
4812 has (or just might have) the high bit set, we know nothing about
4813 the number of sign bit copies. */
4814 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4816 else if ((nonzero_bits (XEXP (x
, 1), mode
)
4817 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4820 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4821 known_x
, known_mode
, known_ret
);
4824 /* Similar to unsigned division, except that we have to worry about
4825 the case where the divisor is negative, in which case we have
4827 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4828 known_x
, known_mode
, known_ret
);
4830 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4831 || (nonzero_bits (XEXP (x
, 1), mode
)
4832 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4838 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4839 known_x
, known_mode
, known_ret
);
4841 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4842 || (nonzero_bits (XEXP (x
, 1), mode
)
4843 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4849 /* Shifts by a constant add to the number of bits equal to the
4851 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4852 known_x
, known_mode
, known_ret
);
4853 if (CONST_INT_P (XEXP (x
, 1))
4854 && INTVAL (XEXP (x
, 1)) > 0
4855 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4856 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
4861 /* Left shifts destroy copies. */
4862 if (!CONST_INT_P (XEXP (x
, 1))
4863 || INTVAL (XEXP (x
, 1)) < 0
4864 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
4865 || INTVAL (XEXP (x
, 1)) >= GET_MODE_PRECISION (GET_MODE (x
)))
4868 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4869 known_x
, known_mode
, known_ret
);
4870 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
4873 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4874 known_x
, known_mode
, known_ret
);
4875 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
4876 known_x
, known_mode
, known_ret
);
4877 return MIN (num0
, num1
);
4879 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
4880 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
4881 case GEU
: case GTU
: case LEU
: case LTU
:
4882 case UNORDERED
: case ORDERED
:
4883 /* If the constant is negative, take its 1's complement and remask.
4884 Then see how many zero bits we have. */
4885 nonzero
= STORE_FLAG_VALUE
;
4886 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4887 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4888 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4890 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4896 /* If we haven't been able to figure it out by one of the above rules,
4897 see if some of the high-order bits are known to be zero. If so,
4898 count those bits and return one less than that amount. If we can't
4899 safely compute the mask for this mode, always return BITWIDTH. */
4901 bitwidth
= GET_MODE_PRECISION (mode
);
4902 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4905 nonzero
= nonzero_bits (x
, mode
);
4906 return nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))
4907 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
4910 /* Calculate the rtx_cost of a single instruction. A return value of
4911 zero indicates an instruction pattern without a known cost. */
4914 insn_rtx_cost (rtx pat
, bool speed
)
4919 /* Extract the single set rtx from the instruction pattern.
4920 We can't use single_set since we only have the pattern. */
4921 if (GET_CODE (pat
) == SET
)
4923 else if (GET_CODE (pat
) == PARALLEL
)
4926 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
4928 rtx x
= XVECEXP (pat
, 0, i
);
4929 if (GET_CODE (x
) == SET
)
4942 cost
= set_src_cost (SET_SRC (set
), speed
);
4943 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
4946 /* Returns estimate on cost of computing SEQ. */
4949 seq_cost (const rtx_insn
*seq
, bool speed
)
4954 for (; seq
; seq
= NEXT_INSN (seq
))
4956 set
= single_set (seq
);
4958 cost
+= set_rtx_cost (set
, speed
);
4966 /* Given an insn INSN and condition COND, return the condition in a
4967 canonical form to simplify testing by callers. Specifically:
4969 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4970 (2) Both operands will be machine operands; (cc0) will have been replaced.
4971 (3) If an operand is a constant, it will be the second operand.
4972 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4973 for GE, GEU, and LEU.
4975 If the condition cannot be understood, or is an inequality floating-point
4976 comparison which needs to be reversed, 0 will be returned.
4978 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4980 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4981 insn used in locating the condition was found. If a replacement test
4982 of the condition is desired, it should be placed in front of that
4983 insn and we will be sure that the inputs are still valid.
4985 If WANT_REG is nonzero, we wish the condition to be relative to that
4986 register, if possible. Therefore, do not canonicalize the condition
4987 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4988 to be a compare to a CC mode register.
4990 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4994 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
4995 rtx_insn
**earliest
,
4996 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
4999 rtx_insn
*prev
= insn
;
5003 int reverse_code
= 0;
5005 basic_block bb
= BLOCK_FOR_INSN (insn
);
5007 code
= GET_CODE (cond
);
5008 mode
= GET_MODE (cond
);
5009 op0
= XEXP (cond
, 0);
5010 op1
= XEXP (cond
, 1);
5013 code
= reversed_comparison_code (cond
, insn
);
5014 if (code
== UNKNOWN
)
5020 /* If we are comparing a register with zero, see if the register is set
5021 in the previous insn to a COMPARE or a comparison operation. Perform
5022 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5025 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5026 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5027 && op1
== CONST0_RTX (GET_MODE (op0
))
5030 /* Set nonzero when we find something of interest. */
5033 /* If comparison with cc0, import actual comparison from compare
5037 if ((prev
= prev_nonnote_insn (prev
)) == 0
5038 || !NONJUMP_INSN_P (prev
)
5039 || (set
= single_set (prev
)) == 0
5040 || SET_DEST (set
) != cc0_rtx
)
5043 op0
= SET_SRC (set
);
5044 op1
= CONST0_RTX (GET_MODE (op0
));
5049 /* If this is a COMPARE, pick up the two things being compared. */
5050 if (GET_CODE (op0
) == COMPARE
)
5052 op1
= XEXP (op0
, 1);
5053 op0
= XEXP (op0
, 0);
5056 else if (!REG_P (op0
))
5059 /* Go back to the previous insn. Stop if it is not an INSN. We also
5060 stop if it isn't a single set or if it has a REG_INC note because
5061 we don't want to bother dealing with it. */
5063 prev
= prev_nonnote_nondebug_insn (prev
);
5066 || !NONJUMP_INSN_P (prev
)
5067 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5068 /* In cfglayout mode, there do not have to be labels at the
5069 beginning of a block, or jumps at the end, so the previous
5070 conditions would not stop us when we reach bb boundary. */
5071 || BLOCK_FOR_INSN (prev
) != bb
)
5074 set
= set_of (op0
, prev
);
5077 && (GET_CODE (set
) != SET
5078 || !rtx_equal_p (SET_DEST (set
), op0
)))
5081 /* If this is setting OP0, get what it sets it to if it looks
5085 machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5086 #ifdef FLOAT_STORE_FLAG_VALUE
5087 REAL_VALUE_TYPE fsfv
;
5090 /* ??? We may not combine comparisons done in a CCmode with
5091 comparisons not done in a CCmode. This is to aid targets
5092 like Alpha that have an IEEE compliant EQ instruction, and
5093 a non-IEEE compliant BEQ instruction. The use of CCmode is
5094 actually artificial, simply to prevent the combination, but
5095 should not affect other platforms.
5097 However, we must allow VOIDmode comparisons to match either
5098 CCmode or non-CCmode comparison, because some ports have
5099 modeless comparisons inside branch patterns.
5101 ??? This mode check should perhaps look more like the mode check
5102 in simplify_comparison in combine. */
5103 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5104 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5106 && inner_mode
!= VOIDmode
)
5108 if (GET_CODE (SET_SRC (set
)) == COMPARE
5111 && val_signbit_known_set_p (inner_mode
,
5113 #ifdef FLOAT_STORE_FLAG_VALUE
5115 && SCALAR_FLOAT_MODE_P (inner_mode
)
5116 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5117 REAL_VALUE_NEGATIVE (fsfv
)))
5120 && COMPARISON_P (SET_SRC (set
))))
5122 else if (((code
== EQ
5124 && val_signbit_known_set_p (inner_mode
,
5126 #ifdef FLOAT_STORE_FLAG_VALUE
5128 && SCALAR_FLOAT_MODE_P (inner_mode
)
5129 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5130 REAL_VALUE_NEGATIVE (fsfv
)))
5133 && COMPARISON_P (SET_SRC (set
)))
5138 else if ((code
== EQ
|| code
== NE
)
5139 && GET_CODE (SET_SRC (set
)) == XOR
)
5140 /* Handle sequences like:
5143 ...(eq|ne op0 (const_int 0))...
5147 (eq op0 (const_int 0)) reduces to (eq X Y)
5148 (ne op0 (const_int 0)) reduces to (ne X Y)
5150 This is the form used by MIPS16, for example. */
5156 else if (reg_set_p (op0
, prev
))
5157 /* If this sets OP0, but not directly, we have to give up. */
5162 /* If the caller is expecting the condition to be valid at INSN,
5163 make sure X doesn't change before INSN. */
5164 if (valid_at_insn_p
)
5165 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5167 if (COMPARISON_P (x
))
5168 code
= GET_CODE (x
);
5171 code
= reversed_comparison_code (x
, prev
);
5172 if (code
== UNKNOWN
)
5177 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5183 /* If constant is first, put it last. */
5184 if (CONSTANT_P (op0
))
5185 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5187 /* If OP0 is the result of a comparison, we weren't able to find what
5188 was really being compared, so fail. */
5190 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5193 /* Canonicalize any ordered comparison with integers involving equality
5194 if we can do computations in the relevant mode and we do not
5197 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
5198 && CONST_INT_P (op1
)
5199 && GET_MODE (op0
) != VOIDmode
5200 && GET_MODE_PRECISION (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
5202 HOST_WIDE_INT const_val
= INTVAL (op1
);
5203 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5204 unsigned HOST_WIDE_INT max_val
5205 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
5210 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5211 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
5214 /* When cross-compiling, const_val might be sign-extended from
5215 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5217 if ((const_val
& max_val
)
5218 != ((unsigned HOST_WIDE_INT
) 1
5219 << (GET_MODE_PRECISION (GET_MODE (op0
)) - 1)))
5220 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
5224 if (uconst_val
< max_val
)
5225 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
5229 if (uconst_val
!= 0)
5230 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
5238 /* Never return CC0; return zero instead. */
5242 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5245 /* Given a jump insn JUMP, return the condition that will cause it to branch
5246 to its JUMP_LABEL. If the condition cannot be understood, or is an
5247 inequality floating-point comparison which needs to be reversed, 0 will
5250 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5251 insn used in locating the condition was found. If a replacement test
5252 of the condition is desired, it should be placed in front of that
5253 insn and we will be sure that the inputs are still valid. If EARLIEST
5254 is null, the returned condition will be valid at INSN.
5256 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5257 compare CC mode register.
5259 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5262 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5263 int valid_at_insn_p
)
5269 /* If this is not a standard conditional jump, we can't parse it. */
5271 || ! any_condjump_p (jump
))
5273 set
= pc_set (jump
);
5275 cond
= XEXP (SET_SRC (set
), 0);
5277 /* If this branches to JUMP_LABEL when the condition is false, reverse
5280 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5281 && LABEL_REF_LABEL (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
5283 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5284 allow_cc_mode
, valid_at_insn_p
);
5287 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5288 TARGET_MODE_REP_EXTENDED.
5290 Note that we assume that the property of
5291 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5292 narrower than mode B. I.e., if A is a mode narrower than B then in
5293 order to be able to operate on it in mode B, mode A needs to
5294 satisfy the requirements set by the representation of mode B. */
5297 init_num_sign_bit_copies_in_rep (void)
5299 machine_mode mode
, in_mode
;
5301 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
5302 in_mode
= GET_MODE_WIDER_MODE (mode
))
5303 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
5304 mode
= GET_MODE_WIDER_MODE (mode
))
5308 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5309 extends to the next widest mode. */
5310 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5311 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
5313 /* We are in in_mode. Count how many bits outside of mode
5314 have to be copies of the sign-bit. */
5315 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
5317 machine_mode wider
= GET_MODE_WIDER_MODE (i
);
5319 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5320 /* We can only check sign-bit copies starting from the
5321 top-bit. In order to be able to check the bits we
5322 have already seen we pretend that subsequent bits
5323 have to be sign-bit copies too. */
5324 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5325 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5326 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5331 /* Suppose that truncation from the machine mode of X to MODE is not a
5332 no-op. See if there is anything special about X so that we can
5333 assume it already contains a truncated value of MODE. */
5336 truncated_to_mode (machine_mode mode
, const_rtx x
)
5338 /* This register has already been used in MODE without explicit
5340 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5343 /* See if we already satisfy the requirements of MODE. If yes we
5344 can just switch to MODE. */
5345 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5346 && (num_sign_bit_copies (x
, GET_MODE (x
))
5347 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5353 /* Return true if RTX code CODE has a single sequence of zero or more
5354 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5355 entry in that case. */
5358 setup_reg_subrtx_bounds (unsigned int code
)
5360 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5362 for (; format
[i
] != 'e'; ++i
)
5365 /* No subrtxes. Leave start and count as 0. */
5367 if (format
[i
] == 'E' || format
[i
] == 'V')
5371 /* Record the sequence of 'e's. */
5372 rtx_all_subrtx_bounds
[code
].start
= i
;
5375 while (format
[i
] == 'e');
5376 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5377 /* rtl-iter.h relies on this. */
5378 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5380 for (; format
[i
]; ++i
)
5381 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5387 /* Initialize rtx_all_subrtx_bounds. */
5392 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5394 if (!setup_reg_subrtx_bounds (i
))
5395 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5396 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5397 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5400 init_num_sign_bit_copies_in_rep ();
5403 /* Check whether this is a constant pool constant. */
5405 constant_pool_constant_p (rtx x
)
5407 x
= avoid_constant_pool_reference (x
);
5408 return CONST_DOUBLE_P (x
);
5411 /* If M is a bitmask that selects a field of low-order bits within an item but
5412 not the entire word, return the length of the field. Return -1 otherwise.
5413 M is used in machine mode MODE. */
5416 low_bitmask_len (machine_mode mode
, unsigned HOST_WIDE_INT m
)
5418 if (mode
!= VOIDmode
)
5420 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
5422 m
&= GET_MODE_MASK (mode
);
5425 return exact_log2 (m
+ 1);
5428 /* Return the mode of MEM's address. */
5431 get_address_mode (rtx mem
)
5435 gcc_assert (MEM_P (mem
));
5436 mode
= GET_MODE (XEXP (mem
, 0));
5437 if (mode
!= VOIDmode
)
5439 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5442 /* Split up a CONST_DOUBLE or integer constant rtx
5443 into two rtx's for single words,
5444 storing in *FIRST the word that comes first in memory in the target
5445 and in *SECOND the other.
5447 TODO: This function needs to be rewritten to work on any size
5451 split_double (rtx value
, rtx
*first
, rtx
*second
)
5453 if (CONST_INT_P (value
))
5455 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5457 /* In this case the CONST_INT holds both target words.
5458 Extract the bits from it into two word-sized pieces.
5459 Sign extend each half to HOST_WIDE_INT. */
5460 unsigned HOST_WIDE_INT low
, high
;
5461 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5462 unsigned bits_per_word
= BITS_PER_WORD
;
5464 /* Set sign_bit to the most significant bit of a word. */
5466 sign_bit
<<= bits_per_word
- 1;
5468 /* Set mask so that all bits of the word are set. We could
5469 have used 1 << BITS_PER_WORD instead of basing the
5470 calculation on sign_bit. However, on machines where
5471 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5472 compiler warning, even though the code would never be
5474 mask
= sign_bit
<< 1;
5477 /* Set sign_extend as any remaining bits. */
5478 sign_extend
= ~mask
;
5480 /* Pick the lower word and sign-extend it. */
5481 low
= INTVAL (value
);
5486 /* Pick the higher word, shifted to the least significant
5487 bits, and sign-extend it. */
5488 high
= INTVAL (value
);
5489 high
>>= bits_per_word
- 1;
5492 if (high
& sign_bit
)
5493 high
|= sign_extend
;
5495 /* Store the words in the target machine order. */
5496 if (WORDS_BIG_ENDIAN
)
5498 *first
= GEN_INT (high
);
5499 *second
= GEN_INT (low
);
5503 *first
= GEN_INT (low
);
5504 *second
= GEN_INT (high
);
5509 /* The rule for using CONST_INT for a wider mode
5510 is that we regard the value as signed.
5511 So sign-extend it. */
5512 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
5513 if (WORDS_BIG_ENDIAN
)
5525 else if (GET_CODE (value
) == CONST_WIDE_INT
)
5527 /* All of this is scary code and needs to be converted to
5528 properly work with any size integer. */
5529 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
5530 if (WORDS_BIG_ENDIAN
)
5532 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5533 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5537 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5538 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5541 else if (!CONST_DOUBLE_P (value
))
5543 if (WORDS_BIG_ENDIAN
)
5545 *first
= const0_rtx
;
5551 *second
= const0_rtx
;
5554 else if (GET_MODE (value
) == VOIDmode
5555 /* This is the old way we did CONST_DOUBLE integers. */
5556 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
5558 /* In an integer, the words are defined as most and least significant.
5559 So order them by the target's convention. */
5560 if (WORDS_BIG_ENDIAN
)
5562 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5563 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
5567 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
5568 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5575 REAL_VALUE_FROM_CONST_DOUBLE (r
, value
);
5577 /* Note, this converts the REAL_VALUE_TYPE to the target's
5578 format, splits up the floating point double and outputs
5579 exactly 32 bits of it into each of l[0] and l[1] --
5580 not necessarily BITS_PER_WORD bits. */
5581 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
5583 /* If 32 bits is an entire word for the target, but not for the host,
5584 then sign-extend on the host so that the number will look the same
5585 way on the host that it would on the target. See for instance
5586 simplify_unary_operation. The #if is needed to avoid compiler
5589 #if HOST_BITS_PER_LONG > 32
5590 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
5592 if (l
[0] & ((long) 1 << 31))
5593 l
[0] |= ((long) (-1) << 32);
5594 if (l
[1] & ((long) 1 << 31))
5595 l
[1] |= ((long) (-1) << 32);
5599 *first
= GEN_INT (l
[0]);
5600 *second
= GEN_INT (l
[1]);
5604 /* Return true if X is a sign_extract or zero_extract from the least
5608 lsb_bitfield_op_p (rtx x
)
5610 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
5612 machine_mode mode
= GET_MODE (XEXP (x
, 0));
5613 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
5614 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
5616 return (pos
== (BITS_BIG_ENDIAN
? GET_MODE_PRECISION (mode
) - len
: 0));
5621 /* Strip outer address "mutations" from LOC and return a pointer to the
5622 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5623 stripped expression there.
5625 "Mutations" either convert between modes or apply some kind of
5626 extension, truncation or alignment. */
5629 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
5633 enum rtx_code code
= GET_CODE (*loc
);
5634 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
5635 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5636 used to convert between pointer sizes. */
5637 loc
= &XEXP (*loc
, 0);
5638 else if (lsb_bitfield_op_p (*loc
))
5639 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5640 acts as a combined truncation and extension. */
5641 loc
= &XEXP (*loc
, 0);
5642 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
5643 /* (and ... (const_int -X)) is used to align to X bytes. */
5644 loc
= &XEXP (*loc
, 0);
5645 else if (code
== SUBREG
5646 && !OBJECT_P (SUBREG_REG (*loc
))
5647 && subreg_lowpart_p (*loc
))
5648 /* (subreg (operator ...) ...) inside and is used for mode
5650 loc
= &SUBREG_REG (*loc
);
5658 /* Return true if CODE applies some kind of scale. The scaled value is
5659 is the first operand and the scale is the second. */
5662 binary_scale_code_p (enum rtx_code code
)
5664 return (code
== MULT
5666 /* Needed by ARM targets. */
5670 || code
== ROTATERT
);
5673 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5674 (see address_info). Return null otherwise. */
5677 get_base_term (rtx
*inner
)
5679 if (GET_CODE (*inner
) == LO_SUM
)
5680 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5683 || GET_CODE (*inner
) == SUBREG
5684 || GET_CODE (*inner
) == SCRATCH
)
5689 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5690 (see address_info). Return null otherwise. */
5693 get_index_term (rtx
*inner
)
5695 /* At present, only constant scales are allowed. */
5696 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
5697 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5700 || GET_CODE (*inner
) == SUBREG
5701 || GET_CODE (*inner
) == SCRATCH
)
5706 /* Set the segment part of address INFO to LOC, given that INNER is the
5710 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5712 gcc_assert (!info
->segment
);
5713 info
->segment
= loc
;
5714 info
->segment_term
= inner
;
5717 /* Set the base part of address INFO to LOC, given that INNER is the
5721 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5723 gcc_assert (!info
->base
);
5725 info
->base_term
= inner
;
5728 /* Set the index part of address INFO to LOC, given that INNER is the
5732 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5734 gcc_assert (!info
->index
);
5736 info
->index_term
= inner
;
5739 /* Set the displacement part of address INFO to LOC, given that INNER
5740 is the constant term. */
5743 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5745 gcc_assert (!info
->disp
);
5747 info
->disp_term
= inner
;
5750 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5751 rest of INFO accordingly. */
5754 decompose_incdec_address (struct address_info
*info
)
5756 info
->autoinc_p
= true;
5758 rtx
*base
= &XEXP (*info
->inner
, 0);
5759 set_address_base (info
, base
, base
);
5760 gcc_checking_assert (info
->base
== info
->base_term
);
5762 /* These addresses are only valid when the size of the addressed
5764 gcc_checking_assert (info
->mode
!= VOIDmode
);
5767 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5768 of INFO accordingly. */
5771 decompose_automod_address (struct address_info
*info
)
5773 info
->autoinc_p
= true;
5775 rtx
*base
= &XEXP (*info
->inner
, 0);
5776 set_address_base (info
, base
, base
);
5777 gcc_checking_assert (info
->base
== info
->base_term
);
5779 rtx plus
= XEXP (*info
->inner
, 1);
5780 gcc_assert (GET_CODE (plus
) == PLUS
);
5782 info
->base_term2
= &XEXP (plus
, 0);
5783 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
5785 rtx
*step
= &XEXP (plus
, 1);
5786 rtx
*inner_step
= strip_address_mutations (step
);
5787 if (CONSTANT_P (*inner_step
))
5788 set_address_disp (info
, step
, inner_step
);
5790 set_address_index (info
, step
, inner_step
);
5793 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5794 values in [PTR, END). Return a pointer to the end of the used array. */
5797 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
5800 if (GET_CODE (x
) == PLUS
)
5802 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
5803 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
5807 gcc_assert (ptr
!= end
);
5813 /* Evaluate the likelihood of X being a base or index value, returning
5814 positive if it is likely to be a base, negative if it is likely to be
5815 an index, and 0 if we can't tell. Make the magnitude of the return
5816 value reflect the amount of confidence we have in the answer.
5818 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5821 baseness (rtx x
, machine_mode mode
, addr_space_t as
,
5822 enum rtx_code outer_code
, enum rtx_code index_code
)
5824 /* Believe *_POINTER unless the address shape requires otherwise. */
5825 if (REG_P (x
) && REG_POINTER (x
))
5827 if (MEM_P (x
) && MEM_POINTER (x
))
5830 if (REG_P (x
) && HARD_REGISTER_P (x
))
5832 /* X is a hard register. If it only fits one of the base
5833 or index classes, choose that interpretation. */
5834 int regno
= REGNO (x
);
5835 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
5836 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
5837 if (base_p
!= index_p
)
5838 return base_p
? 1 : -1;
5843 /* INFO->INNER describes a normal, non-automodified address.
5844 Fill in the rest of INFO accordingly. */
5847 decompose_normal_address (struct address_info
*info
)
5849 /* Treat the address as the sum of up to four values. */
5851 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
5852 ops
+ ARRAY_SIZE (ops
)) - ops
;
5854 /* If there is more than one component, any base component is in a PLUS. */
5856 info
->base_outer_code
= PLUS
;
5858 /* Try to classify each sum operand now. Leave those that could be
5859 either a base or an index in OPS. */
5862 for (size_t in
= 0; in
< n_ops
; ++in
)
5865 rtx
*inner
= strip_address_mutations (loc
);
5866 if (CONSTANT_P (*inner
))
5867 set_address_disp (info
, loc
, inner
);
5868 else if (GET_CODE (*inner
) == UNSPEC
)
5869 set_address_segment (info
, loc
, inner
);
5872 /* The only other possibilities are a base or an index. */
5873 rtx
*base_term
= get_base_term (inner
);
5874 rtx
*index_term
= get_index_term (inner
);
5875 gcc_assert (base_term
|| index_term
);
5877 set_address_index (info
, loc
, index_term
);
5878 else if (!index_term
)
5879 set_address_base (info
, loc
, base_term
);
5882 gcc_assert (base_term
== index_term
);
5884 inner_ops
[out
] = base_term
;
5890 /* Classify the remaining OPS members as bases and indexes. */
5893 /* If we haven't seen a base or an index yet, assume that this is
5894 the base. If we were confident that another term was the base
5895 or index, treat the remaining operand as the other kind. */
5897 set_address_base (info
, ops
[0], inner_ops
[0]);
5899 set_address_index (info
, ops
[0], inner_ops
[0]);
5903 /* In the event of a tie, assume the base comes first. */
5904 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
5906 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
5907 GET_CODE (*ops
[0])))
5909 set_address_base (info
, ops
[0], inner_ops
[0]);
5910 set_address_index (info
, ops
[1], inner_ops
[1]);
5914 set_address_base (info
, ops
[1], inner_ops
[1]);
5915 set_address_index (info
, ops
[0], inner_ops
[0]);
5919 gcc_assert (out
== 0);
5922 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
5923 or VOIDmode if not known. AS is the address space associated with LOC.
5924 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
5927 decompose_address (struct address_info
*info
, rtx
*loc
, machine_mode mode
,
5928 addr_space_t as
, enum rtx_code outer_code
)
5930 memset (info
, 0, sizeof (*info
));
5933 info
->addr_outer_code
= outer_code
;
5935 info
->inner
= strip_address_mutations (loc
, &outer_code
);
5936 info
->base_outer_code
= outer_code
;
5937 switch (GET_CODE (*info
->inner
))
5943 decompose_incdec_address (info
);
5948 decompose_automod_address (info
);
5952 decompose_normal_address (info
);
5957 /* Describe address operand LOC in INFO. */
5960 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
5962 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
5965 /* Describe the address of MEM X in INFO. */
5968 decompose_mem_address (struct address_info
*info
, rtx x
)
5970 gcc_assert (MEM_P (x
));
5971 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
5972 MEM_ADDR_SPACE (x
), MEM
);
5975 /* Update INFO after a change to the address it describes. */
5978 update_address (struct address_info
*info
)
5980 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
5981 info
->addr_outer_code
);
5984 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
5985 more complicated than that. */
5988 get_index_scale (const struct address_info
*info
)
5990 rtx index
= *info
->index
;
5991 if (GET_CODE (index
) == MULT
5992 && CONST_INT_P (XEXP (index
, 1))
5993 && info
->index_term
== &XEXP (index
, 0))
5994 return INTVAL (XEXP (index
, 1));
5996 if (GET_CODE (index
) == ASHIFT
5997 && CONST_INT_P (XEXP (index
, 1))
5998 && info
->index_term
== &XEXP (index
, 0))
5999 return (HOST_WIDE_INT
) 1 << INTVAL (XEXP (index
, 1));
6001 if (info
->index
== info
->index_term
)
6007 /* Return the "index code" of INFO, in the form required by
6011 get_index_code (const struct address_info
*info
)
6014 return GET_CODE (*info
->index
);
6017 return GET_CODE (*info
->disp
);
6022 /* Return true if X contains a thread-local symbol. */
6025 tls_referenced_p (const_rtx x
)
6027 if (!targetm
.have_tls
)
6030 subrtx_iterator::array_type array
;
6031 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6032 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)