1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
28 #include "insn-config.h"
43 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
44 #include "addresses.h"
47 /* Forward declarations */
48 static void set_of_1 (rtx
, const_rtx
, void *);
49 static bool covers_regno_p (const_rtx
, unsigned int);
50 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
51 static int computed_jump_p_1 (const_rtx
);
52 static void parms_set (rtx
, const_rtx
, void *);
54 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, enum machine_mode
,
55 const_rtx
, enum machine_mode
,
56 unsigned HOST_WIDE_INT
);
57 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, enum machine_mode
,
58 const_rtx
, enum machine_mode
,
59 unsigned HOST_WIDE_INT
);
60 static unsigned int cached_num_sign_bit_copies (const_rtx
, enum machine_mode
, const_rtx
,
63 static unsigned int num_sign_bit_copies1 (const_rtx
, enum machine_mode
, const_rtx
,
64 enum machine_mode
, unsigned int);
66 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
67 -1 if a code has no such operand. */
68 static int non_rtx_starting_operands
[NUM_RTX_CODE
];
70 rtx_subrtx_bound_info rtx_all_subrtx_bounds
[NUM_RTX_CODE
];
71 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds
[NUM_RTX_CODE
];
73 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
74 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
75 SIGN_EXTEND then while narrowing we also have to enforce the
76 representation and sign-extend the value to mode DESTINATION_REP.
78 If the value is already sign-extended to DESTINATION_REP mode we
79 can just switch to DESTINATION mode on it. For each pair of
80 integral modes SOURCE and DESTINATION, when truncating from SOURCE
81 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
82 contains the number of high-order bits in SOURCE that have to be
83 copies of the sign-bit so that we can do this mode-switch to
87 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
89 /* Store X into index I of ARRAY. ARRAY is known to have at least I
90 elements. Return the new base of ARRAY. */
93 typename
T::value_type
*
94 generic_subrtx_iterator
<T
>::add_single_to_queue (array_type
&array
,
96 size_t i
, value_type x
)
98 if (base
== array
.stack
)
105 gcc_checking_assert (i
== LOCAL_ELEMS
);
106 vec_safe_grow (array
.heap
, i
+ 1);
107 base
= array
.heap
->address ();
108 memcpy (base
, array
.stack
, sizeof (array
.stack
));
109 base
[LOCAL_ELEMS
] = x
;
112 unsigned int length
= array
.heap
->length ();
115 gcc_checking_assert (base
== array
.heap
->address ());
121 gcc_checking_assert (i
== length
);
122 vec_safe_push (array
.heap
, x
);
123 return array
.heap
->address ();
127 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
128 number of elements added to the worklist. */
130 template <typename T
>
132 generic_subrtx_iterator
<T
>::add_subrtxes_to_queue (array_type
&array
,
134 size_t end
, rtx_type x
)
136 enum rtx_code code
= GET_CODE (x
);
137 const char *format
= GET_RTX_FORMAT (code
);
138 size_t orig_end
= end
;
139 if (__builtin_expect (INSN_P (x
), false))
141 /* Put the pattern at the top of the queue, since that's what
142 we're likely to want most. It also allows for the SEQUENCE
144 for (int i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; --i
)
145 if (format
[i
] == 'e')
147 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
148 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
151 base
= add_single_to_queue (array
, base
, end
++, subx
);
155 for (int i
= 0; format
[i
]; ++i
)
156 if (format
[i
] == 'e')
158 value_type subx
= T::get_value (x
->u
.fld
[i
].rt_rtx
);
159 if (__builtin_expect (end
< LOCAL_ELEMS
, true))
162 base
= add_single_to_queue (array
, base
, end
++, subx
);
164 else if (format
[i
] == 'E')
166 unsigned int length
= GET_NUM_ELEM (x
->u
.fld
[i
].rt_rtvec
);
167 rtx
*vec
= x
->u
.fld
[i
].rt_rtvec
->elem
;
168 if (__builtin_expect (end
+ length
<= LOCAL_ELEMS
, true))
169 for (unsigned int j
= 0; j
< length
; j
++)
170 base
[end
++] = T::get_value (vec
[j
]);
172 for (unsigned int j
= 0; j
< length
; j
++)
173 base
= add_single_to_queue (array
, base
, end
++,
174 T::get_value (vec
[j
]));
175 if (code
== SEQUENCE
&& end
== length
)
176 /* If the subrtxes of the sequence fill the entire array then
177 we know that no other parts of a containing insn are queued.
178 The caller is therefore iterating over the sequence as a
179 PATTERN (...), so we also want the patterns of the
181 for (unsigned int j
= 0; j
< length
; j
++)
183 typename
T::rtx_type x
= T::get_rtx (base
[j
]);
185 base
[j
] = T::get_value (PATTERN (x
));
188 return end
- orig_end
;
191 template <typename T
>
193 generic_subrtx_iterator
<T
>::free_array (array_type
&array
)
195 vec_free (array
.heap
);
198 template <typename T
>
199 const size_t generic_subrtx_iterator
<T
>::LOCAL_ELEMS
;
201 template class generic_subrtx_iterator
<const_rtx_accessor
>;
202 template class generic_subrtx_iterator
<rtx_var_accessor
>;
203 template class generic_subrtx_iterator
<rtx_ptr_accessor
>;
205 /* Return 1 if the value of X is unstable
206 (would be different at a different point in the program).
207 The frame pointer, arg pointer, etc. are considered stable
208 (within one function) and so is anything marked `unchanging'. */
211 rtx_unstable_p (const_rtx x
)
213 const RTX_CODE code
= GET_CODE (x
);
220 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
229 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
230 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
231 /* The arg pointer varies if it is not a fixed register. */
232 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
234 /* ??? When call-clobbered, the value is stable modulo the restore
235 that must happen after a call. This currently screws up local-alloc
236 into believing that the restore is not needed. */
237 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
242 if (MEM_VOLATILE_P (x
))
251 fmt
= GET_RTX_FORMAT (code
);
252 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
255 if (rtx_unstable_p (XEXP (x
, i
)))
258 else if (fmt
[i
] == 'E')
261 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
262 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
269 /* Return 1 if X has a value that can vary even between two
270 executions of the program. 0 means X can be compared reliably
271 against certain constants or near-constants.
272 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
273 zero, we are slightly more conservative.
274 The frame pointer and the arg pointer are considered constant. */
277 rtx_varies_p (const_rtx x
, bool for_alias
)
290 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
299 /* Note that we have to test for the actual rtx used for the frame
300 and arg pointers and not just the register number in case we have
301 eliminated the frame and/or arg pointer and are using it
303 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
304 /* The arg pointer varies if it is not a fixed register. */
305 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
307 if (x
== pic_offset_table_rtx
308 /* ??? When call-clobbered, the value is stable modulo the restore
309 that must happen after a call. This currently screws up
310 local-alloc into believing that the restore is not needed, so we
311 must return 0 only if we are called from alias analysis. */
312 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
317 /* The operand 0 of a LO_SUM is considered constant
318 (in fact it is related specifically to operand 1)
319 during alias analysis. */
320 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
321 || rtx_varies_p (XEXP (x
, 1), for_alias
);
324 if (MEM_VOLATILE_P (x
))
333 fmt
= GET_RTX_FORMAT (code
);
334 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
337 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
340 else if (fmt
[i
] == 'E')
343 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
344 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
351 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
352 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
353 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
354 references on strict alignment machines. */
357 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
358 enum machine_mode mode
, bool unaligned_mems
)
360 enum rtx_code code
= GET_CODE (x
);
362 /* The offset must be a multiple of the mode size if we are considering
363 unaligned memory references on strict alignment machines. */
364 if (STRICT_ALIGNMENT
&& unaligned_mems
&& GET_MODE_SIZE (mode
) != 0)
366 HOST_WIDE_INT actual_offset
= offset
;
368 #ifdef SPARC_STACK_BOUNDARY_HACK
369 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
370 the real alignment of %sp. However, when it does this, the
371 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
372 if (SPARC_STACK_BOUNDARY_HACK
373 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
374 actual_offset
-= STACK_POINTER_OFFSET
;
377 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
384 if (SYMBOL_REF_WEAK (x
))
386 if (!CONSTANT_POOL_ADDRESS_P (x
))
389 HOST_WIDE_INT decl_size
;
394 size
= GET_MODE_SIZE (mode
);
398 /* If the size of the access or of the symbol is unknown,
400 decl
= SYMBOL_REF_DECL (x
);
402 /* Else check that the access is in bounds. TODO: restructure
403 expr_size/tree_expr_size/int_expr_size and just use the latter. */
406 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
407 decl_size
= (tree_fits_shwi_p (DECL_SIZE_UNIT (decl
))
408 ? tree_to_shwi (DECL_SIZE_UNIT (decl
))
410 else if (TREE_CODE (decl
) == STRING_CST
)
411 decl_size
= TREE_STRING_LENGTH (decl
);
412 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
413 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
417 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
426 /* Stack references are assumed not to trap, but we need to deal with
427 nonsensical offsets. */
428 if (x
== frame_pointer_rtx
)
430 HOST_WIDE_INT adj_offset
= offset
- STARTING_FRAME_OFFSET
;
432 size
= GET_MODE_SIZE (mode
);
433 if (FRAME_GROWS_DOWNWARD
)
435 if (adj_offset
< frame_offset
|| adj_offset
+ size
- 1 >= 0)
440 if (adj_offset
< 0 || adj_offset
+ size
- 1 >= frame_offset
)
445 /* ??? Need to add a similar guard for nonsensical offsets. */
446 if (x
== hard_frame_pointer_rtx
447 || x
== stack_pointer_rtx
448 /* The arg pointer varies if it is not a fixed register. */
449 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
451 /* All of the virtual frame registers are stack references. */
452 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
453 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
458 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
459 mode
, unaligned_mems
);
462 /* An address is assumed not to trap if:
463 - it is the pic register plus a constant. */
464 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
467 /* - or it is an address that can't trap plus a constant integer. */
468 if (CONST_INT_P (XEXP (x
, 1))
469 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
470 size
, mode
, unaligned_mems
))
477 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
478 mode
, unaligned_mems
);
485 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
486 mode
, unaligned_mems
);
492 /* If it isn't one of the case above, it can cause a trap. */
496 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
499 rtx_addr_can_trap_p (const_rtx x
)
501 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
504 /* Return true if X is an address that is known to not be zero. */
507 nonzero_address_p (const_rtx x
)
509 const enum rtx_code code
= GET_CODE (x
);
514 return !SYMBOL_REF_WEAK (x
);
520 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
521 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
522 || x
== stack_pointer_rtx
523 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
525 /* All of the virtual frame registers are stack references. */
526 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
527 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
532 return nonzero_address_p (XEXP (x
, 0));
535 /* Handle PIC references. */
536 if (XEXP (x
, 0) == pic_offset_table_rtx
537 && CONSTANT_P (XEXP (x
, 1)))
542 /* Similar to the above; allow positive offsets. Further, since
543 auto-inc is only allowed in memories, the register must be a
545 if (CONST_INT_P (XEXP (x
, 1))
546 && INTVAL (XEXP (x
, 1)) > 0)
548 return nonzero_address_p (XEXP (x
, 0));
551 /* Similarly. Further, the offset is always positive. */
558 return nonzero_address_p (XEXP (x
, 0));
561 return nonzero_address_p (XEXP (x
, 1));
567 /* If it isn't one of the case above, might be zero. */
571 /* Return 1 if X refers to a memory location whose address
572 cannot be compared reliably with constant addresses,
573 or if X refers to a BLKmode memory object.
574 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
575 zero, we are slightly more conservative. */
578 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
589 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
591 fmt
= GET_RTX_FORMAT (code
);
592 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
595 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
598 else if (fmt
[i
] == 'E')
601 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
602 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
608 /* Return the CALL in X if there is one. */
611 get_call_rtx_from (rtx x
)
615 if (GET_CODE (x
) == PARALLEL
)
616 x
= XVECEXP (x
, 0, 0);
617 if (GET_CODE (x
) == SET
)
619 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
624 /* Return the value of the integer term in X, if one is apparent;
626 Only obvious integer terms are detected.
627 This is used in cse.c with the `related_value' field. */
630 get_integer_term (const_rtx x
)
632 if (GET_CODE (x
) == CONST
)
635 if (GET_CODE (x
) == MINUS
636 && CONST_INT_P (XEXP (x
, 1)))
637 return - INTVAL (XEXP (x
, 1));
638 if (GET_CODE (x
) == PLUS
639 && CONST_INT_P (XEXP (x
, 1)))
640 return INTVAL (XEXP (x
, 1));
644 /* If X is a constant, return the value sans apparent integer term;
646 Only obvious integer terms are detected. */
649 get_related_value (const_rtx x
)
651 if (GET_CODE (x
) != CONST
)
654 if (GET_CODE (x
) == PLUS
655 && CONST_INT_P (XEXP (x
, 1)))
657 else if (GET_CODE (x
) == MINUS
658 && CONST_INT_P (XEXP (x
, 1)))
663 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
664 to somewhere in the same object or object_block as SYMBOL. */
667 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
671 if (GET_CODE (symbol
) != SYMBOL_REF
)
679 if (CONSTANT_POOL_ADDRESS_P (symbol
)
680 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
683 decl
= SYMBOL_REF_DECL (symbol
);
684 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
688 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
689 && SYMBOL_REF_BLOCK (symbol
)
690 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
691 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
692 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
698 /* Split X into a base and a constant offset, storing them in *BASE_OUT
699 and *OFFSET_OUT respectively. */
702 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
704 if (GET_CODE (x
) == CONST
)
707 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
709 *base_out
= XEXP (x
, 0);
710 *offset_out
= XEXP (x
, 1);
715 *offset_out
= const0_rtx
;
718 /* Return the number of places FIND appears within X. If COUNT_DEST is
719 zero, we do not count occurrences inside the destination of a SET. */
722 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
726 const char *format_ptr
;
745 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
747 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
751 if (MEM_P (find
) && rtx_equal_p (x
, find
))
756 if (SET_DEST (x
) == find
&& ! count_dest
)
757 return count_occurrences (SET_SRC (x
), find
, count_dest
);
764 format_ptr
= GET_RTX_FORMAT (code
);
767 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
769 switch (*format_ptr
++)
772 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
776 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
777 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
785 /* Return TRUE if OP is a register or subreg of a register that
786 holds an unsigned quantity. Otherwise, return FALSE. */
789 unsigned_reg_p (rtx op
)
793 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
796 if (GET_CODE (op
) == SUBREG
797 && SUBREG_PROMOTED_SIGN (op
))
804 /* Nonzero if register REG appears somewhere within IN.
805 Also works if REG is not a register; in this case it checks
806 for a subexpression of IN that is Lisp "equal" to REG. */
809 reg_mentioned_p (const_rtx reg
, const_rtx in
)
821 if (GET_CODE (in
) == LABEL_REF
)
822 return reg
== LABEL_REF_LABEL (in
);
824 code
= GET_CODE (in
);
828 /* Compare registers by number. */
830 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
832 /* These codes have no constituent expressions
840 /* These are kept unique for a given value. */
847 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
850 fmt
= GET_RTX_FORMAT (code
);
852 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
857 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
858 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
861 else if (fmt
[i
] == 'e'
862 && reg_mentioned_p (reg
, XEXP (in
, i
)))
868 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
869 no CODE_LABEL insn. */
872 no_labels_between_p (const rtx_insn
*beg
, const rtx_insn
*end
)
877 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
883 /* Nonzero if register REG is used in an insn between
884 FROM_INSN and TO_INSN (exclusive of those two). */
887 reg_used_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
888 const rtx_insn
*to_insn
)
892 if (from_insn
== to_insn
)
895 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
896 if (NONDEBUG_INSN_P (insn
)
897 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
898 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
903 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
904 is entirely replaced by a new value and the only use is as a SET_DEST,
905 we do not consider it a reference. */
908 reg_referenced_p (const_rtx x
, const_rtx body
)
912 switch (GET_CODE (body
))
915 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
918 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
919 of a REG that occupies all of the REG, the insn references X if
920 it is mentioned in the destination. */
921 if (GET_CODE (SET_DEST (body
)) != CC0
922 && GET_CODE (SET_DEST (body
)) != PC
923 && !REG_P (SET_DEST (body
))
924 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
925 && REG_P (SUBREG_REG (SET_DEST (body
)))
926 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
927 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
928 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
929 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
930 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
935 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
936 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
943 return reg_overlap_mentioned_p (x
, body
);
946 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
949 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
952 case UNSPEC_VOLATILE
:
953 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
954 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
959 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
960 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
965 if (MEM_P (XEXP (body
, 0)))
966 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
971 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
973 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
980 /* Nonzero if register REG is set or clobbered in an insn between
981 FROM_INSN and TO_INSN (exclusive of those two). */
984 reg_set_between_p (const_rtx reg
, const rtx_insn
*from_insn
,
985 const rtx_insn
*to_insn
)
987 const rtx_insn
*insn
;
989 if (from_insn
== to_insn
)
992 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
993 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
998 /* Internals of reg_set_between_p. */
1000 reg_set_p (const_rtx reg
, const_rtx insn
)
1002 /* We can be passed an insn or part of one. If we are passed an insn,
1003 check if a side-effect of the insn clobbers REG. */
1005 && (FIND_REG_INC_NOTE (insn
, reg
)
1008 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
1009 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
1010 GET_MODE (reg
), REGNO (reg
)))
1012 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
1015 return set_of (reg
, insn
) != NULL_RTX
;
1018 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1019 only if none of them are modified between START and END. Return 1 if
1020 X contains a MEM; this routine does use memory aliasing. */
1023 modified_between_p (const_rtx x
, const rtx_insn
*start
, const rtx_insn
*end
)
1025 const enum rtx_code code
= GET_CODE (x
);
1046 if (modified_between_p (XEXP (x
, 0), start
, end
))
1048 if (MEM_READONLY_P (x
))
1050 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
1051 if (memory_modified_in_insn_p (x
, insn
))
1057 return reg_set_between_p (x
, start
, end
);
1063 fmt
= GET_RTX_FORMAT (code
);
1064 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1066 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
1069 else if (fmt
[i
] == 'E')
1070 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1071 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
1078 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1079 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1080 does use memory aliasing. */
1083 modified_in_p (const_rtx x
, const_rtx insn
)
1085 const enum rtx_code code
= GET_CODE (x
);
1102 if (modified_in_p (XEXP (x
, 0), insn
))
1104 if (MEM_READONLY_P (x
))
1106 if (memory_modified_in_insn_p (x
, insn
))
1112 return reg_set_p (x
, insn
);
1118 fmt
= GET_RTX_FORMAT (code
);
1119 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1121 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
1124 else if (fmt
[i
] == 'E')
1125 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1126 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1133 /* Helper function for set_of. */
1141 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1143 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1144 if (rtx_equal_p (x
, data
->pat
)
1145 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1149 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1150 (either directly or via STRICT_LOW_PART and similar modifiers). */
1152 set_of (const_rtx pat
, const_rtx insn
)
1154 struct set_of_data data
;
1155 data
.found
= NULL_RTX
;
1157 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1161 /* Add all hard register in X to *PSET. */
1163 find_all_hard_regs (const_rtx x
, HARD_REG_SET
*pset
)
1165 subrtx_iterator::array_type array
;
1166 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
1168 const_rtx x
= *iter
;
1169 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1170 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1174 /* This function, called through note_stores, collects sets and
1175 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1178 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1180 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1181 if (REG_P (x
) && HARD_REGISTER_P (x
))
1182 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1185 /* Examine INSN, and compute the set of hard registers written by it.
1186 Store it in *PSET. Should only be called after reload. */
1188 find_all_hard_reg_sets (const_rtx insn
, HARD_REG_SET
*pset
, bool implicit
)
1192 CLEAR_HARD_REG_SET (*pset
);
1193 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1197 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1199 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1200 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1202 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1203 if (REG_NOTE_KIND (link
) == REG_INC
)
1204 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1207 /* Like record_hard_reg_sets, but called through note_uses. */
1209 record_hard_reg_uses (rtx
*px
, void *data
)
1211 find_all_hard_regs (*px
, (HARD_REG_SET
*) data
);
1214 /* Given an INSN, return a SET expression if this insn has only a single SET.
1215 It may also have CLOBBERs, USEs, or SET whose output
1216 will not be used, which we ignore. */
1219 single_set_2 (const rtx_insn
*insn
, const_rtx pat
)
1222 int set_verified
= 1;
1225 if (GET_CODE (pat
) == PARALLEL
)
1227 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1229 rtx sub
= XVECEXP (pat
, 0, i
);
1230 switch (GET_CODE (sub
))
1237 /* We can consider insns having multiple sets, where all
1238 but one are dead as single set insns. In common case
1239 only single set is present in the pattern so we want
1240 to avoid checking for REG_UNUSED notes unless necessary.
1242 When we reach set first time, we just expect this is
1243 the single set we are looking for and only when more
1244 sets are found in the insn, we check them. */
1247 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1248 && !side_effects_p (set
))
1254 set
= sub
, set_verified
= 0;
1255 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1256 || side_effects_p (sub
))
1268 /* Given an INSN, return nonzero if it has more than one SET, else return
1272 multiple_sets (const_rtx insn
)
1277 /* INSN must be an insn. */
1278 if (! INSN_P (insn
))
1281 /* Only a PARALLEL can have multiple SETs. */
1282 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1284 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1285 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1287 /* If we have already found a SET, then return now. */
1295 /* Either zero or one SET. */
1299 /* Return nonzero if the destination of SET equals the source
1300 and there are no side effects. */
1303 set_noop_p (const_rtx set
)
1305 rtx src
= SET_SRC (set
);
1306 rtx dst
= SET_DEST (set
);
1308 if (dst
== pc_rtx
&& src
== pc_rtx
)
1311 if (MEM_P (dst
) && MEM_P (src
))
1312 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1314 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1315 return rtx_equal_p (XEXP (dst
, 0), src
)
1316 && ! BYTES_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1317 && !side_effects_p (src
);
1319 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1320 dst
= XEXP (dst
, 0);
1322 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1324 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1326 src
= SUBREG_REG (src
);
1327 dst
= SUBREG_REG (dst
);
1330 /* It is a NOOP if destination overlaps with selected src vector
1332 if (GET_CODE (src
) == VEC_SELECT
1333 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1334 && HARD_REGISTER_P (XEXP (src
, 0))
1335 && HARD_REGISTER_P (dst
))
1338 rtx par
= XEXP (src
, 1);
1339 rtx src0
= XEXP (src
, 0);
1340 int c0
= INTVAL (XVECEXP (par
, 0, 0));
1341 HOST_WIDE_INT offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1343 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1344 if (INTVAL (XVECEXP (par
, 0, i
)) != c0
+ i
)
1347 simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1348 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1351 return (REG_P (src
) && REG_P (dst
)
1352 && REGNO (src
) == REGNO (dst
));
1355 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1359 noop_move_p (const_rtx insn
)
1361 rtx pat
= PATTERN (insn
);
1363 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1366 /* Insns carrying these notes are useful later on. */
1367 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1370 /* Check the code to be executed for COND_EXEC. */
1371 if (GET_CODE (pat
) == COND_EXEC
)
1372 pat
= COND_EXEC_CODE (pat
);
1374 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1377 if (GET_CODE (pat
) == PARALLEL
)
1380 /* If nothing but SETs of registers to themselves,
1381 this insn can also be deleted. */
1382 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1384 rtx tem
= XVECEXP (pat
, 0, i
);
1386 if (GET_CODE (tem
) == USE
1387 || GET_CODE (tem
) == CLOBBER
)
1390 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1400 /* Return nonzero if register in range [REGNO, ENDREGNO)
1401 appears either explicitly or implicitly in X
1402 other than being stored into.
1404 References contained within the substructure at LOC do not count.
1405 LOC may be zero, meaning don't ignore anything. */
1408 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1412 unsigned int x_regno
;
1417 /* The contents of a REG_NONNEG note is always zero, so we must come here
1418 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1422 code
= GET_CODE (x
);
1427 x_regno
= REGNO (x
);
1429 /* If we modifying the stack, frame, or argument pointer, it will
1430 clobber a virtual register. In fact, we could be more precise,
1431 but it isn't worth it. */
1432 if ((x_regno
== STACK_POINTER_REGNUM
1433 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1434 || x_regno
== ARG_POINTER_REGNUM
1436 || x_regno
== FRAME_POINTER_REGNUM
)
1437 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1440 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1443 /* If this is a SUBREG of a hard reg, we can see exactly which
1444 registers are being modified. Otherwise, handle normally. */
1445 if (REG_P (SUBREG_REG (x
))
1446 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1448 unsigned int inner_regno
= subreg_regno (x
);
1449 unsigned int inner_endregno
1450 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1451 ? subreg_nregs (x
) : 1);
1453 return endregno
> inner_regno
&& regno
< inner_endregno
;
1459 if (&SET_DEST (x
) != loc
1460 /* Note setting a SUBREG counts as referring to the REG it is in for
1461 a pseudo but not for hard registers since we can
1462 treat each word individually. */
1463 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1464 && loc
!= &SUBREG_REG (SET_DEST (x
))
1465 && REG_P (SUBREG_REG (SET_DEST (x
)))
1466 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1467 && refers_to_regno_p (regno
, endregno
,
1468 SUBREG_REG (SET_DEST (x
)), loc
))
1469 || (!REG_P (SET_DEST (x
))
1470 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1473 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1482 /* X does not match, so try its subexpressions. */
1484 fmt
= GET_RTX_FORMAT (code
);
1485 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1487 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1495 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1498 else if (fmt
[i
] == 'E')
1501 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1502 if (loc
!= &XVECEXP (x
, i
, j
)
1503 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1510 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1511 we check if any register number in X conflicts with the relevant register
1512 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1513 contains a MEM (we don't bother checking for memory addresses that can't
1514 conflict because we expect this to be a rare case. */
1517 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1519 unsigned int regno
, endregno
;
1521 /* If either argument is a constant, then modifying X can not
1522 affect IN. Here we look at IN, we can profitably combine
1523 CONSTANT_P (x) with the switch statement below. */
1524 if (CONSTANT_P (in
))
1528 switch (GET_CODE (x
))
1530 case STRICT_LOW_PART
:
1533 /* Overly conservative. */
1538 regno
= REGNO (SUBREG_REG (x
));
1539 if (regno
< FIRST_PSEUDO_REGISTER
)
1540 regno
= subreg_regno (x
);
1541 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1542 ? subreg_nregs (x
) : 1);
1547 endregno
= END_REGNO (x
);
1549 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1559 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1560 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1563 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1566 else if (fmt
[i
] == 'E')
1569 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1570 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1580 return reg_mentioned_p (x
, in
);
1586 /* If any register in here refers to it we return true. */
1587 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1588 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1589 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1595 gcc_assert (CONSTANT_P (x
));
1600 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1601 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1602 ignored by note_stores, but passed to FUN.
1604 FUN receives three arguments:
1605 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1606 2. the SET or CLOBBER rtx that does the store,
1607 3. the pointer DATA provided to note_stores.
1609 If the item being stored in or clobbered is a SUBREG of a hard register,
1610 the SUBREG will be passed. */
1613 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1617 if (GET_CODE (x
) == COND_EXEC
)
1618 x
= COND_EXEC_CODE (x
);
1620 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1622 rtx dest
= SET_DEST (x
);
1624 while ((GET_CODE (dest
) == SUBREG
1625 && (!REG_P (SUBREG_REG (dest
))
1626 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1627 || GET_CODE (dest
) == ZERO_EXTRACT
1628 || GET_CODE (dest
) == STRICT_LOW_PART
)
1629 dest
= XEXP (dest
, 0);
1631 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1632 each of whose first operand is a register. */
1633 if (GET_CODE (dest
) == PARALLEL
)
1635 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1636 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1637 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1640 (*fun
) (dest
, x
, data
);
1643 else if (GET_CODE (x
) == PARALLEL
)
1644 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1645 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1648 /* Like notes_stores, but call FUN for each expression that is being
1649 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1650 FUN for each expression, not any interior subexpressions. FUN receives a
1651 pointer to the expression and the DATA passed to this function.
1653 Note that this is not quite the same test as that done in reg_referenced_p
1654 since that considers something as being referenced if it is being
1655 partially set, while we do not. */
1658 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1663 switch (GET_CODE (body
))
1666 (*fun
) (&COND_EXEC_TEST (body
), data
);
1667 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1671 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1672 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1676 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1677 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1681 (*fun
) (&XEXP (body
, 0), data
);
1685 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1686 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1690 (*fun
) (&TRAP_CONDITION (body
), data
);
1694 (*fun
) (&XEXP (body
, 0), data
);
1698 case UNSPEC_VOLATILE
:
1699 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1700 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1704 if (MEM_P (XEXP (body
, 0)))
1705 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1710 rtx dest
= SET_DEST (body
);
1712 /* For sets we replace everything in source plus registers in memory
1713 expression in store and operands of a ZERO_EXTRACT. */
1714 (*fun
) (&SET_SRC (body
), data
);
1716 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1718 (*fun
) (&XEXP (dest
, 1), data
);
1719 (*fun
) (&XEXP (dest
, 2), data
);
1722 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1723 dest
= XEXP (dest
, 0);
1726 (*fun
) (&XEXP (dest
, 0), data
);
1731 /* All the other possibilities never store. */
1732 (*fun
) (pbody
, data
);
1737 /* Return nonzero if X's old contents don't survive after INSN.
1738 This will be true if X is (cc0) or if X is a register and
1739 X dies in INSN or because INSN entirely sets X.
1741 "Entirely set" means set directly and not through a SUBREG, or
1742 ZERO_EXTRACT, so no trace of the old contents remains.
1743 Likewise, REG_INC does not count.
1745 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1746 but for this use that makes no difference, since regs don't overlap
1747 during their lifetimes. Therefore, this function may be used
1748 at any time after deaths have been computed.
1750 If REG is a hard reg that occupies multiple machine registers, this
1751 function will only return 1 if each of those registers will be replaced
1755 dead_or_set_p (const_rtx insn
, const_rtx x
)
1757 unsigned int regno
, end_regno
;
1760 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1761 if (GET_CODE (x
) == CC0
)
1764 gcc_assert (REG_P (x
));
1767 end_regno
= END_REGNO (x
);
1768 for (i
= regno
; i
< end_regno
; i
++)
1769 if (! dead_or_set_regno_p (insn
, i
))
1775 /* Return TRUE iff DEST is a register or subreg of a register and
1776 doesn't change the number of words of the inner register, and any
1777 part of the register is TEST_REGNO. */
1780 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
1782 unsigned int regno
, endregno
;
1784 if (GET_CODE (dest
) == SUBREG
1785 && (((GET_MODE_SIZE (GET_MODE (dest
))
1786 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
1787 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
1788 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
1789 dest
= SUBREG_REG (dest
);
1794 regno
= REGNO (dest
);
1795 endregno
= END_REGNO (dest
);
1796 return (test_regno
>= regno
&& test_regno
< endregno
);
1799 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1800 any member matches the covers_regno_no_parallel_p criteria. */
1803 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
1805 if (GET_CODE (dest
) == PARALLEL
)
1807 /* Some targets place small structures in registers for return
1808 values of functions, and those registers are wrapped in
1809 PARALLELs that we may see as the destination of a SET. */
1812 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1814 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
1815 if (inner
!= NULL_RTX
1816 && covers_regno_no_parallel_p (inner
, test_regno
))
1823 return covers_regno_no_parallel_p (dest
, test_regno
);
1826 /* Utility function for dead_or_set_p to check an individual register. */
1829 dead_or_set_regno_p (const_rtx insn
, unsigned int test_regno
)
1833 /* See if there is a death note for something that includes TEST_REGNO. */
1834 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
1838 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
1841 pattern
= PATTERN (insn
);
1843 /* If a COND_EXEC is not executed, the value survives. */
1844 if (GET_CODE (pattern
) == COND_EXEC
)
1847 if (GET_CODE (pattern
) == SET
)
1848 return covers_regno_p (SET_DEST (pattern
), test_regno
);
1849 else if (GET_CODE (pattern
) == PARALLEL
)
1853 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
1855 rtx body
= XVECEXP (pattern
, 0, i
);
1857 if (GET_CODE (body
) == COND_EXEC
)
1858 body
= COND_EXEC_CODE (body
);
1860 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
1861 && covers_regno_p (SET_DEST (body
), test_regno
))
1869 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1870 If DATUM is nonzero, look for one whose datum is DATUM. */
1873 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
1877 gcc_checking_assert (insn
);
1879 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1880 if (! INSN_P (insn
))
1884 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1885 if (REG_NOTE_KIND (link
) == kind
)
1890 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1891 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
1896 /* Return the reg-note of kind KIND in insn INSN which applies to register
1897 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1898 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1899 it might be the case that the note overlaps REGNO. */
1902 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
1906 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1907 if (! INSN_P (insn
))
1910 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1911 if (REG_NOTE_KIND (link
) == kind
1912 /* Verify that it is a register, so that scratch and MEM won't cause a
1914 && REG_P (XEXP (link
, 0))
1915 && REGNO (XEXP (link
, 0)) <= regno
1916 && END_REGNO (XEXP (link
, 0)) > regno
)
1921 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1925 find_reg_equal_equiv_note (const_rtx insn
)
1932 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1933 if (REG_NOTE_KIND (link
) == REG_EQUAL
1934 || REG_NOTE_KIND (link
) == REG_EQUIV
)
1936 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1937 insns that have multiple sets. Checking single_set to
1938 make sure of this is not the proper check, as explained
1939 in the comment in set_unique_reg_note.
1941 This should be changed into an assert. */
1942 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
1949 /* Check whether INSN is a single_set whose source is known to be
1950 equivalent to a constant. Return that constant if so, otherwise
1954 find_constant_src (const rtx_insn
*insn
)
1958 set
= single_set (insn
);
1961 x
= avoid_constant_pool_reference (SET_SRC (set
));
1966 note
= find_reg_equal_equiv_note (insn
);
1967 if (note
&& CONSTANT_P (XEXP (note
, 0)))
1968 return XEXP (note
, 0);
1973 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1974 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1977 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
1979 /* If it's not a CALL_INSN, it can't possibly have a
1980 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1990 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
1992 link
= XEXP (link
, 1))
1993 if (GET_CODE (XEXP (link
, 0)) == code
1994 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
1999 unsigned int regno
= REGNO (datum
);
2001 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2002 to pseudo registers, so don't bother checking. */
2004 if (regno
< FIRST_PSEUDO_REGISTER
)
2006 unsigned int end_regno
= END_HARD_REGNO (datum
);
2009 for (i
= regno
; i
< end_regno
; i
++)
2010 if (find_regno_fusage (insn
, code
, i
))
2018 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2019 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2022 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
2026 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2027 to pseudo registers, so don't bother checking. */
2029 if (regno
>= FIRST_PSEUDO_REGISTER
2033 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
2037 if (GET_CODE (op
= XEXP (link
, 0)) == code
2038 && REG_P (reg
= XEXP (op
, 0))
2039 && REGNO (reg
) <= regno
2040 && END_HARD_REGNO (reg
) > regno
)
2048 /* Return true if KIND is an integer REG_NOTE. */
2051 int_reg_note_p (enum reg_note kind
)
2053 return kind
== REG_BR_PROB
;
2056 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2057 stored as the pointer to the next register note. */
2060 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
2064 gcc_checking_assert (!int_reg_note_p (kind
));
2069 case REG_LABEL_TARGET
:
2070 case REG_LABEL_OPERAND
:
2072 /* These types of register notes use an INSN_LIST rather than an
2073 EXPR_LIST, so that copying is done right and dumps look
2075 note
= alloc_INSN_LIST (datum
, list
);
2076 PUT_REG_NOTE_KIND (note
, kind
);
2080 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2087 /* Add register note with kind KIND and datum DATUM to INSN. */
2090 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2092 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2095 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2098 add_int_reg_note (rtx insn
, enum reg_note kind
, int datum
)
2100 gcc_checking_assert (int_reg_note_p (kind
));
2101 REG_NOTES (insn
) = gen_rtx_INT_LIST ((enum machine_mode
) kind
,
2102 datum
, REG_NOTES (insn
));
2105 /* Add a register note like NOTE to INSN. */
2108 add_shallow_copy_of_reg_note (rtx insn
, rtx note
)
2110 if (GET_CODE (note
) == INT_LIST
)
2111 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2113 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2116 /* Remove register note NOTE from the REG_NOTES of INSN. */
2119 remove_note (rtx insn
, const_rtx note
)
2123 if (note
== NULL_RTX
)
2126 if (REG_NOTES (insn
) == note
)
2127 REG_NOTES (insn
) = XEXP (note
, 1);
2129 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2130 if (XEXP (link
, 1) == note
)
2132 XEXP (link
, 1) = XEXP (note
, 1);
2136 switch (REG_NOTE_KIND (note
))
2140 df_notes_rescan (as_a
<rtx_insn
*> (insn
));
2147 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2150 remove_reg_equal_equiv_notes (rtx insn
)
2154 loc
= ®_NOTES (insn
);
2157 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2158 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2159 *loc
= XEXP (*loc
, 1);
2161 loc
= &XEXP (*loc
, 1);
2165 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2168 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2175 /* This loop is a little tricky. We cannot just go down the chain because
2176 it is being modified by some actions in the loop. So we just iterate
2177 over the head. We plan to drain the list anyway. */
2178 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2180 rtx_insn
*insn
= DF_REF_INSN (eq_use
);
2181 rtx note
= find_reg_equal_equiv_note (insn
);
2183 /* This assert is generally triggered when someone deletes a REG_EQUAL
2184 or REG_EQUIV note by hacking the list manually rather than calling
2188 remove_note (insn
, note
);
2192 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2193 return 1 if it is found. A simple equality test is used to determine if
2197 in_expr_list_p (const_rtx listp
, const_rtx node
)
2201 for (x
= listp
; x
; x
= XEXP (x
, 1))
2202 if (node
== XEXP (x
, 0))
2208 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2209 remove that entry from the list if it is found.
2211 A simple equality test is used to determine if NODE matches. */
2214 remove_node_from_expr_list (const_rtx node
, rtx_expr_list
**listp
)
2216 rtx_expr_list
*temp
= *listp
;
2217 rtx prev
= NULL_RTX
;
2221 if (node
== temp
->element ())
2223 /* Splice the node out of the list. */
2225 XEXP (prev
, 1) = temp
->next ();
2227 *listp
= temp
->next ();
2233 temp
= temp
->next ();
2237 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2238 remove that entry from the list if it is found.
2240 A simple equality test is used to determine if NODE matches. */
2243 remove_node_from_insn_list (const rtx_insn
*node
, rtx_insn_list
**listp
)
2245 rtx_insn_list
*temp
= *listp
;
2250 if (node
== temp
->insn ())
2252 /* Splice the node out of the list. */
2254 XEXP (prev
, 1) = temp
->next ();
2256 *listp
= temp
->next ();
2262 temp
= temp
->next ();
2266 /* Nonzero if X contains any volatile instructions. These are instructions
2267 which may cause unpredictable machine state instructions, and thus no
2268 instructions or register uses should be moved or combined across them.
2269 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2272 volatile_insn_p (const_rtx x
)
2274 const RTX_CODE code
= GET_CODE (x
);
2292 case UNSPEC_VOLATILE
:
2297 if (MEM_VOLATILE_P (x
))
2304 /* Recursively scan the operands of this expression. */
2307 const char *const fmt
= GET_RTX_FORMAT (code
);
2310 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2314 if (volatile_insn_p (XEXP (x
, i
)))
2317 else if (fmt
[i
] == 'E')
2320 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2321 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2329 /* Nonzero if X contains any volatile memory references
2330 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2333 volatile_refs_p (const_rtx x
)
2335 const RTX_CODE code
= GET_CODE (x
);
2351 case UNSPEC_VOLATILE
:
2357 if (MEM_VOLATILE_P (x
))
2364 /* Recursively scan the operands of this expression. */
2367 const char *const fmt
= GET_RTX_FORMAT (code
);
2370 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2374 if (volatile_refs_p (XEXP (x
, i
)))
2377 else if (fmt
[i
] == 'E')
2380 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2381 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2389 /* Similar to above, except that it also rejects register pre- and post-
2393 side_effects_p (const_rtx x
)
2395 const RTX_CODE code
= GET_CODE (x
);
2412 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2413 when some combination can't be done. If we see one, don't think
2414 that we can simplify the expression. */
2415 return (GET_MODE (x
) != VOIDmode
);
2424 case UNSPEC_VOLATILE
:
2430 if (MEM_VOLATILE_P (x
))
2437 /* Recursively scan the operands of this expression. */
2440 const char *fmt
= GET_RTX_FORMAT (code
);
2443 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2447 if (side_effects_p (XEXP (x
, i
)))
2450 else if (fmt
[i
] == 'E')
2453 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2454 if (side_effects_p (XVECEXP (x
, i
, j
)))
2462 /* Return nonzero if evaluating rtx X might cause a trap.
2463 FLAGS controls how to consider MEMs. A nonzero means the context
2464 of the access may have changed from the original, such that the
2465 address may have become invalid. */
2468 may_trap_p_1 (const_rtx x
, unsigned flags
)
2474 /* We make no distinction currently, but this function is part of
2475 the internal target-hooks ABI so we keep the parameter as
2476 "unsigned flags". */
2477 bool code_changed
= flags
!= 0;
2481 code
= GET_CODE (x
);
2484 /* Handle these cases quickly. */
2496 return targetm
.unspec_may_trap_p (x
, flags
);
2498 case UNSPEC_VOLATILE
:
2504 return MEM_VOLATILE_P (x
);
2506 /* Memory ref can trap unless it's a static var or a stack slot. */
2508 /* Recognize specific pattern of stack checking probes. */
2509 if (flag_stack_check
2510 && MEM_VOLATILE_P (x
)
2511 && XEXP (x
, 0) == stack_pointer_rtx
)
2513 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2514 reference; moving it out of context such as when moving code
2515 when optimizing, might cause its address to become invalid. */
2517 || !MEM_NOTRAP_P (x
))
2519 HOST_WIDE_INT size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : 0;
2520 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2521 GET_MODE (x
), code_changed
);
2526 /* Division by a non-constant might trap. */
2531 if (HONOR_SNANS (GET_MODE (x
)))
2533 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2534 return flag_trapping_math
;
2535 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2540 /* An EXPR_LIST is used to represent a function call. This
2541 certainly may trap. */
2550 /* Some floating point comparisons may trap. */
2551 if (!flag_trapping_math
)
2553 /* ??? There is no machine independent way to check for tests that trap
2554 when COMPARE is used, though many targets do make this distinction.
2555 For instance, sparc uses CCFPE for compares which generate exceptions
2556 and CCFP for compares which do not generate exceptions. */
2557 if (HONOR_NANS (GET_MODE (x
)))
2559 /* But often the compare has some CC mode, so check operand
2561 if (HONOR_NANS (GET_MODE (XEXP (x
, 0)))
2562 || HONOR_NANS (GET_MODE (XEXP (x
, 1))))
2568 if (HONOR_SNANS (GET_MODE (x
)))
2570 /* Often comparison is CC mode, so check operand modes. */
2571 if (HONOR_SNANS (GET_MODE (XEXP (x
, 0)))
2572 || HONOR_SNANS (GET_MODE (XEXP (x
, 1))))
2577 /* Conversion of floating point might trap. */
2578 if (flag_trapping_math
&& HONOR_NANS (GET_MODE (XEXP (x
, 0))))
2585 /* These operations don't trap even with floating point. */
2589 /* Any floating arithmetic may trap. */
2590 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2594 fmt
= GET_RTX_FORMAT (code
);
2595 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2599 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2602 else if (fmt
[i
] == 'E')
2605 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2606 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2613 /* Return nonzero if evaluating rtx X might cause a trap. */
2616 may_trap_p (const_rtx x
)
2618 return may_trap_p_1 (x
, 0);
2621 /* Same as above, but additionally return nonzero if evaluating rtx X might
2622 cause a fault. We define a fault for the purpose of this function as a
2623 erroneous execution condition that cannot be encountered during the normal
2624 execution of a valid program; the typical example is an unaligned memory
2625 access on a strict alignment machine. The compiler guarantees that it
2626 doesn't generate code that will fault from a valid program, but this
2627 guarantee doesn't mean anything for individual instructions. Consider
2628 the following example:
2630 struct S { int d; union { char *cp; int *ip; }; };
2632 int foo(struct S *s)
2640 on a strict alignment machine. In a valid program, foo will never be
2641 invoked on a structure for which d is equal to 1 and the underlying
2642 unique field of the union not aligned on a 4-byte boundary, but the
2643 expression *s->ip might cause a fault if considered individually.
2645 At the RTL level, potentially problematic expressions will almost always
2646 verify may_trap_p; for example, the above dereference can be emitted as
2647 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2648 However, suppose that foo is inlined in a caller that causes s->cp to
2649 point to a local character variable and guarantees that s->d is not set
2650 to 1; foo may have been effectively translated into pseudo-RTL as:
2653 (set (reg:SI) (mem:SI (%fp - 7)))
2655 (set (reg:QI) (mem:QI (%fp - 7)))
2657 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2658 memory reference to a stack slot, but it will certainly cause a fault
2659 on a strict alignment machine. */
2662 may_trap_or_fault_p (const_rtx x
)
2664 return may_trap_p_1 (x
, 1);
2667 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2668 i.e., an inequality. */
2671 inequality_comparisons_p (const_rtx x
)
2675 const enum rtx_code code
= GET_CODE (x
);
2703 len
= GET_RTX_LENGTH (code
);
2704 fmt
= GET_RTX_FORMAT (code
);
2706 for (i
= 0; i
< len
; i
++)
2710 if (inequality_comparisons_p (XEXP (x
, i
)))
2713 else if (fmt
[i
] == 'E')
2716 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2717 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2725 /* Replace any occurrence of FROM in X with TO. The function does
2726 not enter into CONST_DOUBLE for the replace.
2728 Note that copying is not done so X must not be shared unless all copies
2729 are to be modified. */
2732 replace_rtx (rtx x
, rtx from
, rtx to
)
2740 /* Allow this function to make replacements in EXPR_LISTs. */
2744 if (GET_CODE (x
) == SUBREG
)
2746 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
);
2748 if (CONST_INT_P (new_rtx
))
2750 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
2751 GET_MODE (SUBREG_REG (x
)),
2756 SUBREG_REG (x
) = new_rtx
;
2760 else if (GET_CODE (x
) == ZERO_EXTEND
)
2762 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
);
2764 if (CONST_INT_P (new_rtx
))
2766 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
2767 new_rtx
, GET_MODE (XEXP (x
, 0)));
2771 XEXP (x
, 0) = new_rtx
;
2776 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2777 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2780 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
);
2781 else if (fmt
[i
] == 'E')
2782 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2783 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
), from
, to
);
2789 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
2790 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
2793 replace_label (rtx
*loc
, rtx old_label
, rtx new_label
, bool update_label_nuses
)
2795 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
2797 if (JUMP_TABLE_DATA_P (x
))
2800 rtvec vec
= XVEC (x
, GET_CODE (x
) == ADDR_DIFF_VEC
);
2801 int len
= GET_NUM_ELEM (vec
);
2802 for (int i
= 0; i
< len
; ++i
)
2804 rtx ref
= RTVEC_ELT (vec
, i
);
2805 if (XEXP (ref
, 0) == old_label
)
2807 XEXP (ref
, 0) = new_label
;
2808 if (update_label_nuses
)
2810 ++LABEL_NUSES (new_label
);
2811 --LABEL_NUSES (old_label
);
2818 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2819 field. This is not handled by the iterator because it doesn't
2820 handle unprinted ('0') fields. */
2821 if (JUMP_P (x
) && JUMP_LABEL (x
) == old_label
)
2822 JUMP_LABEL (x
) = new_label
;
2824 subrtx_ptr_iterator::array_type array
;
2825 FOR_EACH_SUBRTX_PTR (iter
, array
, loc
, ALL
)
2830 if (GET_CODE (x
) == SYMBOL_REF
2831 && CONSTANT_POOL_ADDRESS_P (x
))
2833 rtx c
= get_pool_constant (x
);
2834 if (rtx_referenced_p (old_label
, c
))
2836 /* Create a copy of constant C; replace the label inside
2837 but do not update LABEL_NUSES because uses in constant pool
2839 rtx new_c
= copy_rtx (c
);
2840 replace_label (&new_c
, old_label
, new_label
, false);
2842 /* Add the new constant NEW_C to constant pool and replace
2843 the old reference to constant by new reference. */
2844 rtx new_mem
= force_const_mem (get_pool_mode (x
), new_c
);
2845 *loc
= replace_rtx (x
, x
, XEXP (new_mem
, 0));
2849 if ((GET_CODE (x
) == LABEL_REF
2850 || GET_CODE (x
) == INSN_LIST
)
2851 && XEXP (x
, 0) == old_label
)
2853 XEXP (x
, 0) = new_label
;
2854 if (update_label_nuses
)
2856 ++LABEL_NUSES (new_label
);
2857 --LABEL_NUSES (old_label
);
2865 replace_label_in_insn (rtx_insn
*insn
, rtx old_label
, rtx new_label
,
2866 bool update_label_nuses
)
2868 rtx insn_as_rtx
= insn
;
2869 replace_label (&insn_as_rtx
, old_label
, new_label
, update_label_nuses
);
2870 gcc_checking_assert (insn_as_rtx
== insn
);
2873 /* Return true if X is referenced in BODY. */
2876 rtx_referenced_p (const_rtx x
, const_rtx body
)
2878 subrtx_iterator::array_type array
;
2879 FOR_EACH_SUBRTX (iter
, array
, body
, ALL
)
2880 if (const_rtx y
= *iter
)
2882 /* Check if a label_ref Y refers to label X. */
2883 if (GET_CODE (y
) == LABEL_REF
2885 && LABEL_REF_LABEL (y
) == x
)
2888 if (rtx_equal_p (x
, y
))
2891 /* If Y is a reference to pool constant traverse the constant. */
2892 if (GET_CODE (y
) == SYMBOL_REF
2893 && CONSTANT_POOL_ADDRESS_P (y
))
2894 iter
.substitute (get_pool_constant (y
));
2899 /* If INSN is a tablejump return true and store the label (before jump table) to
2900 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2903 tablejump_p (const rtx_insn
*insn
, rtx
*labelp
, rtx_jump_table_data
**tablep
)
2910 label
= JUMP_LABEL (insn
);
2911 if (label
!= NULL_RTX
&& !ANY_RETURN_P (label
)
2912 && (table
= NEXT_INSN (as_a
<rtx_insn
*> (label
))) != NULL_RTX
2913 && JUMP_TABLE_DATA_P (table
))
2918 *tablep
= as_a
<rtx_jump_table_data
*> (table
);
2924 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2925 constant that is not in the constant pool and not in the condition
2926 of an IF_THEN_ELSE. */
2929 computed_jump_p_1 (const_rtx x
)
2931 const enum rtx_code code
= GET_CODE (x
);
2948 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
2949 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
2952 return (computed_jump_p_1 (XEXP (x
, 1))
2953 || computed_jump_p_1 (XEXP (x
, 2)));
2959 fmt
= GET_RTX_FORMAT (code
);
2960 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2963 && computed_jump_p_1 (XEXP (x
, i
)))
2966 else if (fmt
[i
] == 'E')
2967 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2968 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
2975 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2977 Tablejumps and casesi insns are not considered indirect jumps;
2978 we can recognize them by a (use (label_ref)). */
2981 computed_jump_p (const_rtx insn
)
2986 rtx pat
= PATTERN (insn
);
2988 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2989 if (JUMP_LABEL (insn
) != NULL
)
2992 if (GET_CODE (pat
) == PARALLEL
)
2994 int len
= XVECLEN (pat
, 0);
2995 int has_use_labelref
= 0;
2997 for (i
= len
- 1; i
>= 0; i
--)
2998 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
2999 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
3002 has_use_labelref
= 1;
3006 if (! has_use_labelref
)
3007 for (i
= len
- 1; i
>= 0; i
--)
3008 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
3009 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
3010 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
3013 else if (GET_CODE (pat
) == SET
3014 && SET_DEST (pat
) == pc_rtx
3015 && computed_jump_p_1 (SET_SRC (pat
)))
3021 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
3022 calls. Processes the subexpressions of EXP and passes them to F. */
3024 for_each_rtx_1 (rtx exp
, int n
, rtx_function f
, void *data
)
3027 const char *format
= GET_RTX_FORMAT (GET_CODE (exp
));
3030 for (; format
[n
] != '\0'; n
++)
3037 result
= (*f
) (x
, data
);
3039 /* Do not traverse sub-expressions. */
3041 else if (result
!= 0)
3042 /* Stop the traversal. */
3046 /* There are no sub-expressions. */
3049 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
3052 result
= for_each_rtx_1 (*x
, i
, f
, data
);
3060 if (XVEC (exp
, n
) == 0)
3062 for (j
= 0; j
< XVECLEN (exp
, n
); ++j
)
3065 x
= &XVECEXP (exp
, n
, j
);
3066 result
= (*f
) (x
, data
);
3068 /* Do not traverse sub-expressions. */
3070 else if (result
!= 0)
3071 /* Stop the traversal. */
3075 /* There are no sub-expressions. */
3078 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
3081 result
= for_each_rtx_1 (*x
, i
, f
, data
);
3089 /* Nothing to do. */
3097 /* Traverse X via depth-first search, calling F for each
3098 sub-expression (including X itself). F is also passed the DATA.
3099 If F returns -1, do not traverse sub-expressions, but continue
3100 traversing the rest of the tree. If F ever returns any other
3101 nonzero value, stop the traversal, and return the value returned
3102 by F. Otherwise, return 0. This function does not traverse inside
3103 tree structure that contains RTX_EXPRs, or into sub-expressions
3104 whose format code is `0' since it is not known whether or not those
3105 codes are actually RTL.
3107 This routine is very general, and could (should?) be used to
3108 implement many of the other routines in this file. */
3111 for_each_rtx (rtx
*x
, rtx_function f
, void *data
)
3117 result
= (*f
) (x
, data
);
3119 /* Do not traverse sub-expressions. */
3121 else if (result
!= 0)
3122 /* Stop the traversal. */
3126 /* There are no sub-expressions. */
3129 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
3133 return for_each_rtx_1 (*x
, i
, f
, data
);
3136 /* Like "for_each_rtx", but for calling on an rtx_insn **. */
3139 for_each_rtx_in_insn (rtx_insn
**insn
, rtx_function f
, void *data
)
3141 rtx insn_as_rtx
= *insn
;
3144 result
= for_each_rtx (&insn_as_rtx
, f
, data
);
3146 if (insn_as_rtx
!= *insn
)
3147 *insn
= safe_as_a
<rtx_insn
*> (insn_as_rtx
);
3154 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3155 the equivalent add insn and pass the result to FN, using DATA as the
3159 for_each_inc_dec_find_inc_dec (rtx mem
, for_each_inc_dec_fn fn
, void *data
)
3161 rtx x
= XEXP (mem
, 0);
3162 switch (GET_CODE (x
))
3167 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3168 rtx r1
= XEXP (x
, 0);
3169 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3170 return fn (mem
, x
, r1
, r1
, c
, data
);
3176 int size
= GET_MODE_SIZE (GET_MODE (mem
));
3177 rtx r1
= XEXP (x
, 0);
3178 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3179 return fn (mem
, x
, r1
, r1
, c
, data
);
3185 rtx r1
= XEXP (x
, 0);
3186 rtx add
= XEXP (x
, 1);
3187 return fn (mem
, x
, r1
, add
, NULL
, data
);
3195 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3196 For each such autoinc operation found, call FN, passing it
3197 the innermost enclosing MEM, the operation itself, the RTX modified
3198 by the operation, two RTXs (the second may be NULL) that, once
3199 added, represent the value to be held by the modified RTX
3200 afterwards, and DATA. FN is to return 0 to continue the
3201 traversal or any other value to have it returned to the caller of
3202 for_each_inc_dec. */
3205 for_each_inc_dec (rtx x
,
3206 for_each_inc_dec_fn fn
,
3209 subrtx_var_iterator::array_type array
;
3210 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, NONCONST
)
3215 && GET_RTX_CLASS (GET_CODE (XEXP (mem
, 0))) == RTX_AUTOINC
)
3217 int res
= for_each_inc_dec_find_inc_dec (mem
, fn
, data
);
3220 iter
.skip_subrtxes ();
3227 /* Searches X for any reference to REGNO, returning the rtx of the
3228 reference found if any. Otherwise, returns NULL_RTX. */
3231 regno_use_in (unsigned int regno
, rtx x
)
3237 if (REG_P (x
) && REGNO (x
) == regno
)
3240 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3241 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3245 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3248 else if (fmt
[i
] == 'E')
3249 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3250 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3257 /* Return a value indicating whether OP, an operand of a commutative
3258 operation, is preferred as the first or second operand. The higher
3259 the value, the stronger the preference for being the first operand.
3260 We use negative values to indicate a preference for the first operand
3261 and positive values for the second operand. */
3264 commutative_operand_precedence (rtx op
)
3266 enum rtx_code code
= GET_CODE (op
);
3268 /* Constants always come the second operand. Prefer "nice" constants. */
3269 if (code
== CONST_INT
)
3271 if (code
== CONST_WIDE_INT
)
3273 if (code
== CONST_DOUBLE
)
3275 if (code
== CONST_FIXED
)
3277 op
= avoid_constant_pool_reference (op
);
3278 code
= GET_CODE (op
);
3280 switch (GET_RTX_CLASS (code
))
3283 if (code
== CONST_INT
)
3285 if (code
== CONST_WIDE_INT
)
3287 if (code
== CONST_DOUBLE
)
3289 if (code
== CONST_FIXED
)
3294 /* SUBREGs of objects should come second. */
3295 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3300 /* Complex expressions should be the first, so decrease priority
3301 of objects. Prefer pointer objects over non pointer objects. */
3302 if ((REG_P (op
) && REG_POINTER (op
))
3303 || (MEM_P (op
) && MEM_POINTER (op
)))
3307 case RTX_COMM_ARITH
:
3308 /* Prefer operands that are themselves commutative to be first.
3309 This helps to make things linear. In particular,
3310 (and (and (reg) (reg)) (not (reg))) is canonical. */
3314 /* If only one operand is a binary expression, it will be the first
3315 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3316 is canonical, although it will usually be further simplified. */
3320 /* Then prefer NEG and NOT. */
3321 if (code
== NEG
|| code
== NOT
)
3329 /* Return 1 iff it is necessary to swap operands of commutative operation
3330 in order to canonicalize expression. */
3333 swap_commutative_operands_p (rtx x
, rtx y
)
3335 return (commutative_operand_precedence (x
)
3336 < commutative_operand_precedence (y
));
3339 /* Return 1 if X is an autoincrement side effect and the register is
3340 not the stack pointer. */
3342 auto_inc_p (const_rtx x
)
3344 switch (GET_CODE (x
))
3352 /* There are no REG_INC notes for SP. */
3353 if (XEXP (x
, 0) != stack_pointer_rtx
)
3361 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3363 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3372 code
= GET_CODE (in
);
3373 fmt
= GET_RTX_FORMAT (code
);
3374 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3378 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3381 else if (fmt
[i
] == 'E')
3382 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3383 if (loc
== &XVECEXP (in
, i
, j
)
3384 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3390 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3391 and SUBREG_BYTE, return the bit offset where the subreg begins
3392 (counting from the least significant bit of the operand). */
3395 subreg_lsb_1 (enum machine_mode outer_mode
,
3396 enum machine_mode inner_mode
,
3397 unsigned int subreg_byte
)
3399 unsigned int bitpos
;
3403 /* A paradoxical subreg begins at bit position 0. */
3404 if (GET_MODE_PRECISION (outer_mode
) > GET_MODE_PRECISION (inner_mode
))
3407 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3408 /* If the subreg crosses a word boundary ensure that
3409 it also begins and ends on a word boundary. */
3410 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3411 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3412 && (subreg_byte
% UNITS_PER_WORD
3413 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3415 if (WORDS_BIG_ENDIAN
)
3416 word
= (GET_MODE_SIZE (inner_mode
)
3417 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3419 word
= subreg_byte
/ UNITS_PER_WORD
;
3420 bitpos
= word
* BITS_PER_WORD
;
3422 if (BYTES_BIG_ENDIAN
)
3423 byte
= (GET_MODE_SIZE (inner_mode
)
3424 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3426 byte
= subreg_byte
% UNITS_PER_WORD
;
3427 bitpos
+= byte
* BITS_PER_UNIT
;
3432 /* Given a subreg X, return the bit offset where the subreg begins
3433 (counting from the least significant bit of the reg). */
3436 subreg_lsb (const_rtx x
)
3438 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3442 /* Fill in information about a subreg of a hard register.
3443 xregno - A regno of an inner hard subreg_reg (or what will become one).
3444 xmode - The mode of xregno.
3445 offset - The byte offset.
3446 ymode - The mode of a top level SUBREG (or what may become one).
3447 info - Pointer to structure to fill in.
3449 Rather than considering one particular inner register (and thus one
3450 particular "outer" register) in isolation, this function really uses
3451 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3452 function does not check whether adding INFO->offset to XREGNO gives
3453 a valid hard register; even if INFO->offset + XREGNO is out of range,
3454 there might be another register of the same type that is in range.
3455 Likewise it doesn't check whether HARD_REGNO_MODE_OK accepts the new
3456 register, since that can depend on things like whether the final
3457 register number is even or odd. Callers that want to check whether
3458 this particular subreg can be replaced by a simple (reg ...) should
3459 use simplify_subreg_regno. */
3462 subreg_get_info (unsigned int xregno
, enum machine_mode xmode
,
3463 unsigned int offset
, enum machine_mode ymode
,
3464 struct subreg_info
*info
)
3466 int nregs_xmode
, nregs_ymode
;
3467 int mode_multiple
, nregs_multiple
;
3468 int offset_adj
, y_offset
, y_offset_adj
;
3469 int regsize_xmode
, regsize_ymode
;
3472 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3476 /* If there are holes in a non-scalar mode in registers, we expect
3477 that it is made up of its units concatenated together. */
3478 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3480 enum machine_mode xmode_unit
;
3482 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3483 if (GET_MODE_INNER (xmode
) == VOIDmode
)
3486 xmode_unit
= GET_MODE_INNER (xmode
);
3487 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3488 gcc_assert (nregs_xmode
3489 == (GET_MODE_NUNITS (xmode
)
3490 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3491 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3492 == (hard_regno_nregs
[xregno
][xmode_unit
]
3493 * GET_MODE_NUNITS (xmode
)));
3495 /* You can only ask for a SUBREG of a value with holes in the middle
3496 if you don't cross the holes. (Such a SUBREG should be done by
3497 picking a different register class, or doing it in memory if
3498 necessary.) An example of a value with holes is XCmode on 32-bit
3499 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3500 3 for each part, but in memory it's two 128-bit parts.
3501 Padding is assumed to be at the end (not necessarily the 'high part')
3503 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1
3504 < GET_MODE_NUNITS (xmode
))
3505 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3506 != ((offset
+ GET_MODE_SIZE (ymode
) - 1)
3507 / GET_MODE_SIZE (xmode_unit
))))
3509 info
->representable_p
= false;
3514 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3516 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3518 /* Paradoxical subregs are otherwise valid. */
3521 && GET_MODE_PRECISION (ymode
) > GET_MODE_PRECISION (xmode
))
3523 info
->representable_p
= true;
3524 /* If this is a big endian paradoxical subreg, which uses more
3525 actual hard registers than the original register, we must
3526 return a negative offset so that we find the proper highpart
3528 if (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
3529 ? REG_WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
3530 info
->offset
= nregs_xmode
- nregs_ymode
;
3533 info
->nregs
= nregs_ymode
;
3537 /* If registers store different numbers of bits in the different
3538 modes, we cannot generally form this subreg. */
3539 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3540 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3541 && (GET_MODE_SIZE (xmode
) % nregs_xmode
) == 0
3542 && (GET_MODE_SIZE (ymode
) % nregs_ymode
) == 0)
3544 regsize_xmode
= GET_MODE_SIZE (xmode
) / nregs_xmode
;
3545 regsize_ymode
= GET_MODE_SIZE (ymode
) / nregs_ymode
;
3546 if (!rknown
&& regsize_xmode
> regsize_ymode
&& nregs_ymode
> 1)
3548 info
->representable_p
= false;
3550 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3551 info
->offset
= offset
/ regsize_xmode
;
3554 if (!rknown
&& regsize_ymode
> regsize_xmode
&& nregs_xmode
> 1)
3556 info
->representable_p
= false;
3558 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3559 info
->offset
= offset
/ regsize_xmode
;
3564 /* Lowpart subregs are otherwise valid. */
3565 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3567 info
->representable_p
= true;
3570 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3573 info
->nregs
= nregs_ymode
;
3578 /* This should always pass, otherwise we don't know how to verify
3579 the constraint. These conditions may be relaxed but
3580 subreg_regno_offset would need to be redesigned. */
3581 gcc_assert ((GET_MODE_SIZE (xmode
) % GET_MODE_SIZE (ymode
)) == 0);
3582 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3584 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
3585 && GET_MODE_SIZE (xmode
) > UNITS_PER_WORD
)
3587 HOST_WIDE_INT xsize
= GET_MODE_SIZE (xmode
);
3588 HOST_WIDE_INT ysize
= GET_MODE_SIZE (ymode
);
3589 HOST_WIDE_INT off_low
= offset
& (ysize
- 1);
3590 HOST_WIDE_INT off_high
= offset
& ~(ysize
- 1);
3591 offset
= (xsize
- ysize
- off_high
) | off_low
;
3593 /* The XMODE value can be seen as a vector of NREGS_XMODE
3594 values. The subreg must represent a lowpart of given field.
3595 Compute what field it is. */
3596 offset_adj
= offset
;
3597 offset_adj
-= subreg_lowpart_offset (ymode
,
3598 mode_for_size (GET_MODE_BITSIZE (xmode
)
3602 /* Size of ymode must not be greater than the size of xmode. */
3603 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
3604 gcc_assert (mode_multiple
!= 0);
3606 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
3607 y_offset_adj
= offset_adj
/ GET_MODE_SIZE (ymode
);
3608 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
3610 gcc_assert ((offset_adj
% GET_MODE_SIZE (ymode
)) == 0);
3611 gcc_assert ((mode_multiple
% nregs_multiple
) == 0);
3615 info
->representable_p
= (!(y_offset_adj
% (mode_multiple
/ nregs_multiple
)));
3618 info
->offset
= (y_offset
/ (mode_multiple
/ nregs_multiple
)) * nregs_ymode
;
3619 info
->nregs
= nregs_ymode
;
3622 /* This function returns the regno offset of a subreg expression.
3623 xregno - A regno of an inner hard subreg_reg (or what will become one).
3624 xmode - The mode of xregno.
3625 offset - The byte offset.
3626 ymode - The mode of a top level SUBREG (or what may become one).
3627 RETURN - The regno offset which would be used. */
3629 subreg_regno_offset (unsigned int xregno
, enum machine_mode xmode
,
3630 unsigned int offset
, enum machine_mode ymode
)
3632 struct subreg_info info
;
3633 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3637 /* This function returns true when the offset is representable via
3638 subreg_offset in the given regno.
3639 xregno - A regno of an inner hard subreg_reg (or what will become one).
3640 xmode - The mode of xregno.
3641 offset - The byte offset.
3642 ymode - The mode of a top level SUBREG (or what may become one).
3643 RETURN - Whether the offset is representable. */
3645 subreg_offset_representable_p (unsigned int xregno
, enum machine_mode xmode
,
3646 unsigned int offset
, enum machine_mode ymode
)
3648 struct subreg_info info
;
3649 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3650 return info
.representable_p
;
3653 /* Return the number of a YMODE register to which
3655 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3657 can be simplified. Return -1 if the subreg can't be simplified.
3659 XREGNO is a hard register number. */
3662 simplify_subreg_regno (unsigned int xregno
, enum machine_mode xmode
,
3663 unsigned int offset
, enum machine_mode ymode
)
3665 struct subreg_info info
;
3666 unsigned int yregno
;
3668 #ifdef CANNOT_CHANGE_MODE_CLASS
3669 /* Give the backend a chance to disallow the mode change. */
3670 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3671 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3672 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3673 /* We can use mode change in LRA for some transformations. */
3674 && ! lra_in_progress
)
3678 /* We shouldn't simplify stack-related registers. */
3679 if ((!reload_completed
|| frame_pointer_needed
)
3680 && xregno
== FRAME_POINTER_REGNUM
)
3683 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3684 && xregno
== ARG_POINTER_REGNUM
)
3687 if (xregno
== STACK_POINTER_REGNUM
3688 /* We should convert hard stack register in LRA if it is
3690 && ! lra_in_progress
)
3693 /* Try to get the register offset. */
3694 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3695 if (!info
.representable_p
)
3698 /* Make sure that the offsetted register value is in range. */
3699 yregno
= xregno
+ info
.offset
;
3700 if (!HARD_REGISTER_NUM_P (yregno
))
3703 /* See whether (reg:YMODE YREGNO) is valid.
3705 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3706 This is a kludge to work around how complex FP arguments are passed
3707 on IA-64 and should be fixed. See PR target/49226. */
3708 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3709 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3712 return (int) yregno
;
3715 /* Return the final regno that a subreg expression refers to. */
3717 subreg_regno (const_rtx x
)
3720 rtx subreg
= SUBREG_REG (x
);
3721 int regno
= REGNO (subreg
);
3723 ret
= regno
+ subreg_regno_offset (regno
,
3731 /* Return the number of registers that a subreg expression refers
3734 subreg_nregs (const_rtx x
)
3736 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3739 /* Return the number of registers that a subreg REG with REGNO
3740 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3741 changed so that the regno can be passed in. */
3744 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3746 struct subreg_info info
;
3747 rtx subreg
= SUBREG_REG (x
);
3749 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3755 struct parms_set_data
3761 /* Helper function for noticing stores to parameter registers. */
3763 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3765 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3766 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3767 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3769 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3774 /* Look backward for first parameter to be loaded.
3775 Note that loads of all parameters will not necessarily be
3776 found if CSE has eliminated some of them (e.g., an argument
3777 to the outer function is passed down as a parameter).
3778 Do not skip BOUNDARY. */
3780 find_first_parameter_load (rtx_insn
*call_insn
, rtx_insn
*boundary
)
3782 struct parms_set_data parm
;
3784 rtx_insn
*before
, *first_set
;
3786 /* Since different machines initialize their parameter registers
3787 in different orders, assume nothing. Collect the set of all
3788 parameter registers. */
3789 CLEAR_HARD_REG_SET (parm
.regs
);
3791 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3792 if (GET_CODE (XEXP (p
, 0)) == USE
3793 && REG_P (XEXP (XEXP (p
, 0), 0)))
3795 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3797 /* We only care about registers which can hold function
3799 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3802 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3806 first_set
= call_insn
;
3808 /* Search backward for the first set of a register in this set. */
3809 while (parm
.nregs
&& before
!= boundary
)
3811 before
= PREV_INSN (before
);
3813 /* It is possible that some loads got CSEed from one call to
3814 another. Stop in that case. */
3815 if (CALL_P (before
))
3818 /* Our caller needs either ensure that we will find all sets
3819 (in case code has not been optimized yet), or take care
3820 for possible labels in a way by setting boundary to preceding
3822 if (LABEL_P (before
))
3824 gcc_assert (before
== boundary
);
3828 if (INSN_P (before
))
3830 int nregs_old
= parm
.nregs
;
3831 note_stores (PATTERN (before
), parms_set
, &parm
);
3832 /* If we found something that did not set a parameter reg,
3833 we're done. Do not keep going, as that might result
3834 in hoisting an insn before the setting of a pseudo
3835 that is used by the hoisted insn. */
3836 if (nregs_old
!= parm
.nregs
)
3845 /* Return true if we should avoid inserting code between INSN and preceding
3846 call instruction. */
3849 keep_with_call_p (const rtx_insn
*insn
)
3853 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
3855 if (REG_P (SET_DEST (set
))
3856 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
3857 && fixed_regs
[REGNO (SET_DEST (set
))]
3858 && general_operand (SET_SRC (set
), VOIDmode
))
3860 if (REG_P (SET_SRC (set
))
3861 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
3862 && REG_P (SET_DEST (set
))
3863 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3865 /* There may be a stack pop just after the call and before the store
3866 of the return register. Search for the actual store when deciding
3867 if we can break or not. */
3868 if (SET_DEST (set
) == stack_pointer_rtx
)
3870 /* This CONST_CAST is okay because next_nonnote_insn just
3871 returns its argument and we assign it to a const_rtx
3874 = next_nonnote_insn (const_cast<rtx_insn
*> (insn
));
3875 if (i2
&& keep_with_call_p (i2
))
3882 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3883 to non-complex jumps. That is, direct unconditional, conditional,
3884 and tablejumps, but not computed jumps or returns. It also does
3885 not apply to the fallthru case of a conditional jump. */
3888 label_is_jump_target_p (const_rtx label
, const rtx_insn
*jump_insn
)
3890 rtx tmp
= JUMP_LABEL (jump_insn
);
3891 rtx_jump_table_data
*table
;
3896 if (tablejump_p (jump_insn
, NULL
, &table
))
3898 rtvec vec
= table
->get_labels ();
3899 int i
, veclen
= GET_NUM_ELEM (vec
);
3901 for (i
= 0; i
< veclen
; ++i
)
3902 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
3906 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
3913 /* Return an estimate of the cost of computing rtx X.
3914 One use is in cse, to decide which expression to keep in the hash table.
3915 Another is in rtl generation, to pick the cheapest way to multiply.
3916 Other uses like the latter are expected in the future.
3918 X appears as operand OPNO in an expression with code OUTER_CODE.
3919 SPEED specifies whether costs optimized for speed or size should
3923 rtx_cost (rtx x
, enum rtx_code outer_code
, int opno
, bool speed
)
3934 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3935 many insns, taking N times as long. */
3936 factor
= GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
;
3940 /* Compute the default costs of certain things.
3941 Note that targetm.rtx_costs can override the defaults. */
3943 code
= GET_CODE (x
);
3947 /* Multiplication has time-complexity O(N*N), where N is the
3948 number of units (translated from digits) when using
3949 schoolbook long multiplication. */
3950 total
= factor
* factor
* COSTS_N_INSNS (5);
3956 /* Similarly, complexity for schoolbook long division. */
3957 total
= factor
* factor
* COSTS_N_INSNS (7);
3960 /* Used in combine.c as a marker. */
3964 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3965 the mode for the factor. */
3966 factor
= GET_MODE_SIZE (GET_MODE (SET_DEST (x
))) / UNITS_PER_WORD
;
3971 total
= factor
* COSTS_N_INSNS (1);
3981 /* If we can't tie these modes, make this expensive. The larger
3982 the mode, the more expensive it is. */
3983 if (! MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (SUBREG_REG (x
))))
3984 return COSTS_N_INSNS (2 + factor
);
3988 if (targetm
.rtx_costs (x
, code
, outer_code
, opno
, &total
, speed
))
3993 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3994 which is already in total. */
3996 fmt
= GET_RTX_FORMAT (code
);
3997 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3999 total
+= rtx_cost (XEXP (x
, i
), code
, i
, speed
);
4000 else if (fmt
[i
] == 'E')
4001 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
4002 total
+= rtx_cost (XVECEXP (x
, i
, j
), code
, i
, speed
);
4007 /* Fill in the structure C with information about both speed and size rtx
4008 costs for X, which is operand OPNO in an expression with code OUTER. */
4011 get_full_rtx_cost (rtx x
, enum rtx_code outer
, int opno
,
4012 struct full_rtx_costs
*c
)
4014 c
->speed
= rtx_cost (x
, outer
, opno
, true);
4015 c
->size
= rtx_cost (x
, outer
, opno
, false);
4019 /* Return cost of address expression X.
4020 Expect that X is properly formed address reference.
4022 SPEED parameter specify whether costs optimized for speed or size should
4026 address_cost (rtx x
, enum machine_mode mode
, addr_space_t as
, bool speed
)
4028 /* We may be asked for cost of various unusual addresses, such as operands
4029 of push instruction. It is not worthwhile to complicate writing
4030 of the target hook by such cases. */
4032 if (!memory_address_addr_space_p (mode
, x
, as
))
4035 return targetm
.address_cost (x
, mode
, as
, speed
);
4038 /* If the target doesn't override, compute the cost as with arithmetic. */
4041 default_address_cost (rtx x
, enum machine_mode
, addr_space_t
, bool speed
)
4043 return rtx_cost (x
, MEM
, 0, speed
);
4047 unsigned HOST_WIDE_INT
4048 nonzero_bits (const_rtx x
, enum machine_mode mode
)
4050 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
4054 num_sign_bit_copies (const_rtx x
, enum machine_mode mode
)
4056 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
4059 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4060 It avoids exponential behavior in nonzero_bits1 when X has
4061 identical subexpressions on the first or the second level. */
4063 static unsigned HOST_WIDE_INT
4064 cached_nonzero_bits (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4065 enum machine_mode known_mode
,
4066 unsigned HOST_WIDE_INT known_ret
)
4068 if (x
== known_x
&& mode
== known_mode
)
4071 /* Try to find identical subexpressions. If found call
4072 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4073 precomputed value for the subexpression as KNOWN_RET. */
4075 if (ARITHMETIC_P (x
))
4077 rtx x0
= XEXP (x
, 0);
4078 rtx x1
= XEXP (x
, 1);
4080 /* Check the first level. */
4082 return nonzero_bits1 (x
, mode
, x0
, mode
,
4083 cached_nonzero_bits (x0
, mode
, known_x
,
4084 known_mode
, known_ret
));
4086 /* Check the second level. */
4087 if (ARITHMETIC_P (x0
)
4088 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4089 return nonzero_bits1 (x
, mode
, x1
, mode
,
4090 cached_nonzero_bits (x1
, mode
, known_x
,
4091 known_mode
, known_ret
));
4093 if (ARITHMETIC_P (x1
)
4094 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4095 return nonzero_bits1 (x
, mode
, x0
, mode
,
4096 cached_nonzero_bits (x0
, mode
, known_x
,
4097 known_mode
, known_ret
));
4100 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
4103 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4104 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4105 is less useful. We can't allow both, because that results in exponential
4106 run time recursion. There is a nullstone testcase that triggered
4107 this. This macro avoids accidental uses of num_sign_bit_copies. */
4108 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4110 /* Given an expression, X, compute which bits in X can be nonzero.
4111 We don't care about bits outside of those defined in MODE.
4113 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
4114 an arithmetic operation, we can do better. */
4116 static unsigned HOST_WIDE_INT
4117 nonzero_bits1 (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4118 enum machine_mode known_mode
,
4119 unsigned HOST_WIDE_INT known_ret
)
4121 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4122 unsigned HOST_WIDE_INT inner_nz
;
4124 enum machine_mode inner_mode
;
4125 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4127 /* For floating-point and vector values, assume all bits are needed. */
4128 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
4129 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4132 /* If X is wider than MODE, use its mode instead. */
4133 if (GET_MODE_PRECISION (GET_MODE (x
)) > mode_width
)
4135 mode
= GET_MODE (x
);
4136 nonzero
= GET_MODE_MASK (mode
);
4137 mode_width
= GET_MODE_PRECISION (mode
);
4140 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4141 /* Our only callers in this case look for single bit values. So
4142 just return the mode mask. Those tests will then be false. */
4145 #ifndef WORD_REGISTER_OPERATIONS
4146 /* If MODE is wider than X, but both are a single word for both the host
4147 and target machines, we can compute this from which bits of the
4148 object might be nonzero in its own mode, taking into account the fact
4149 that on many CISC machines, accessing an object in a wider mode
4150 causes the high-order bits to become undefined. So they are
4151 not known to be zero. */
4153 if (GET_MODE (x
) != VOIDmode
&& GET_MODE (x
) != mode
4154 && GET_MODE_PRECISION (GET_MODE (x
)) <= BITS_PER_WORD
4155 && GET_MODE_PRECISION (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
4156 && GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (GET_MODE (x
)))
4158 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
4159 known_x
, known_mode
, known_ret
);
4160 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
4165 code
= GET_CODE (x
);
4169 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4170 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4171 all the bits above ptr_mode are known to be zero. */
4172 /* As we do not know which address space the pointer is referring to,
4173 we can do this only if the target does not support different pointer
4174 or address modes depending on the address space. */
4175 if (target_default_pointer_address_modes_p ()
4176 && POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4178 nonzero
&= GET_MODE_MASK (ptr_mode
);
4181 /* Include declared information about alignment of pointers. */
4182 /* ??? We don't properly preserve REG_POINTER changes across
4183 pointer-to-integer casts, so we can't trust it except for
4184 things that we know must be pointers. See execute/960116-1.c. */
4185 if ((x
== stack_pointer_rtx
4186 || x
== frame_pointer_rtx
4187 || x
== arg_pointer_rtx
)
4188 && REGNO_POINTER_ALIGN (REGNO (x
)))
4190 unsigned HOST_WIDE_INT alignment
4191 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4193 #ifdef PUSH_ROUNDING
4194 /* If PUSH_ROUNDING is defined, it is possible for the
4195 stack to be momentarily aligned only to that amount,
4196 so we pick the least alignment. */
4197 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4198 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
4202 nonzero
&= ~(alignment
- 1);
4206 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4207 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
4208 known_mode
, known_ret
,
4212 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4213 known_mode
, known_ret
);
4215 return nonzero_for_hook
;
4219 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4220 /* If X is negative in MODE, sign-extend the value. */
4222 && mode_width
< BITS_PER_WORD
4223 && (UINTVAL (x
) & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
4225 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4231 #ifdef LOAD_EXTEND_OP
4232 /* In many, if not most, RISC machines, reading a byte from memory
4233 zeros the rest of the register. Noticing that fact saves a lot
4234 of extra zero-extends. */
4235 if (LOAD_EXTEND_OP (GET_MODE (x
)) == ZERO_EXTEND
)
4236 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
4241 case UNEQ
: case LTGT
:
4242 case GT
: case GTU
: case UNGT
:
4243 case LT
: case LTU
: case UNLT
:
4244 case GE
: case GEU
: case UNGE
:
4245 case LE
: case LEU
: case UNLE
:
4246 case UNORDERED
: case ORDERED
:
4247 /* If this produces an integer result, we know which bits are set.
4248 Code here used to clear bits outside the mode of X, but that is
4250 /* Mind that MODE is the mode the caller wants to look at this
4251 operation in, and not the actual operation mode. We can wind
4252 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4253 that describes the results of a vector compare. */
4254 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
4255 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4256 nonzero
= STORE_FLAG_VALUE
;
4261 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4262 and num_sign_bit_copies. */
4263 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4264 == GET_MODE_PRECISION (GET_MODE (x
)))
4268 if (GET_MODE_PRECISION (GET_MODE (x
)) < mode_width
)
4269 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
4274 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4275 and num_sign_bit_copies. */
4276 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4277 == GET_MODE_PRECISION (GET_MODE (x
)))
4283 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4284 known_x
, known_mode
, known_ret
)
4285 & GET_MODE_MASK (mode
));
4289 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4290 known_x
, known_mode
, known_ret
);
4291 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4292 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4296 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4297 Otherwise, show all the bits in the outer mode but not the inner
4299 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4300 known_x
, known_mode
, known_ret
);
4301 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4303 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4304 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4305 inner_nz
|= (GET_MODE_MASK (mode
)
4306 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4309 nonzero
&= inner_nz
;
4313 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4314 known_x
, known_mode
, known_ret
)
4315 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4316 known_x
, known_mode
, known_ret
);
4320 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4322 unsigned HOST_WIDE_INT nonzero0
4323 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4324 known_x
, known_mode
, known_ret
);
4326 /* Don't call nonzero_bits for the second time if it cannot change
4328 if ((nonzero
& nonzero0
) != nonzero
)
4330 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4331 known_x
, known_mode
, known_ret
);
4335 case PLUS
: case MINUS
:
4337 case DIV
: case UDIV
:
4338 case MOD
: case UMOD
:
4339 /* We can apply the rules of arithmetic to compute the number of
4340 high- and low-order zero bits of these operations. We start by
4341 computing the width (position of the highest-order nonzero bit)
4342 and the number of low-order zero bits for each value. */
4344 unsigned HOST_WIDE_INT nz0
4345 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4346 known_x
, known_mode
, known_ret
);
4347 unsigned HOST_WIDE_INT nz1
4348 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4349 known_x
, known_mode
, known_ret
);
4350 int sign_index
= GET_MODE_PRECISION (GET_MODE (x
)) - 1;
4351 int width0
= floor_log2 (nz0
) + 1;
4352 int width1
= floor_log2 (nz1
) + 1;
4353 int low0
= floor_log2 (nz0
& -nz0
);
4354 int low1
= floor_log2 (nz1
& -nz1
);
4355 unsigned HOST_WIDE_INT op0_maybe_minusp
4356 = nz0
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4357 unsigned HOST_WIDE_INT op1_maybe_minusp
4358 = nz1
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4359 unsigned int result_width
= mode_width
;
4365 result_width
= MAX (width0
, width1
) + 1;
4366 result_low
= MIN (low0
, low1
);
4369 result_low
= MIN (low0
, low1
);
4372 result_width
= width0
+ width1
;
4373 result_low
= low0
+ low1
;
4378 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4379 result_width
= width0
;
4384 result_width
= width0
;
4389 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4390 result_width
= MIN (width0
, width1
);
4391 result_low
= MIN (low0
, low1
);
4396 result_width
= MIN (width0
, width1
);
4397 result_low
= MIN (low0
, low1
);
4403 if (result_width
< mode_width
)
4404 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << result_width
) - 1;
4407 nonzero
&= ~(((unsigned HOST_WIDE_INT
) 1 << result_low
) - 1);
4412 if (CONST_INT_P (XEXP (x
, 1))
4413 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4414 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (x
, 1))) - 1;
4418 /* If this is a SUBREG formed for a promoted variable that has
4419 been zero-extended, we know that at least the high-order bits
4420 are zero, though others might be too. */
4422 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
))
4423 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4424 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4425 known_x
, known_mode
, known_ret
);
4427 inner_mode
= GET_MODE (SUBREG_REG (x
));
4428 /* If the inner mode is a single word for both the host and target
4429 machines, we can compute this from which bits of the inner
4430 object might be nonzero. */
4431 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4432 && (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
))
4434 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4435 known_x
, known_mode
, known_ret
);
4437 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4438 /* If this is a typical RISC machine, we only have to worry
4439 about the way loads are extended. */
4440 if ((LOAD_EXTEND_OP (inner_mode
) == SIGN_EXTEND
4441 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4442 : LOAD_EXTEND_OP (inner_mode
) != ZERO_EXTEND
)
4443 || !MEM_P (SUBREG_REG (x
)))
4446 /* On many CISC machines, accessing an object in a wider mode
4447 causes the high-order bits to become undefined. So they are
4448 not known to be zero. */
4449 if (GET_MODE_PRECISION (GET_MODE (x
))
4450 > GET_MODE_PRECISION (inner_mode
))
4451 nonzero
|= (GET_MODE_MASK (GET_MODE (x
))
4452 & ~GET_MODE_MASK (inner_mode
));
4461 /* The nonzero bits are in two classes: any bits within MODE
4462 that aren't in GET_MODE (x) are always significant. The rest of the
4463 nonzero bits are those that are significant in the operand of
4464 the shift when shifted the appropriate number of bits. This
4465 shows that high-order bits are cleared by the right shift and
4466 low-order bits by left shifts. */
4467 if (CONST_INT_P (XEXP (x
, 1))
4468 && INTVAL (XEXP (x
, 1)) >= 0
4469 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4470 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4472 enum machine_mode inner_mode
= GET_MODE (x
);
4473 unsigned int width
= GET_MODE_PRECISION (inner_mode
);
4474 int count
= INTVAL (XEXP (x
, 1));
4475 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4476 unsigned HOST_WIDE_INT op_nonzero
4477 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4478 known_x
, known_mode
, known_ret
);
4479 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4480 unsigned HOST_WIDE_INT outer
= 0;
4482 if (mode_width
> width
)
4483 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4485 if (code
== LSHIFTRT
)
4487 else if (code
== ASHIFTRT
)
4491 /* If the sign bit may have been nonzero before the shift, we
4492 need to mark all the places it could have been copied to
4493 by the shift as possibly nonzero. */
4494 if (inner
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1 - count
)))
4495 inner
|= (((unsigned HOST_WIDE_INT
) 1 << count
) - 1)
4498 else if (code
== ASHIFT
)
4501 inner
= ((inner
<< (count
% width
)
4502 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4504 nonzero
&= (outer
| inner
);
4510 /* This is at most the number of bits in the mode. */
4511 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4515 /* If CLZ has a known value at zero, then the nonzero bits are
4516 that value, plus the number of bits in the mode minus one. */
4517 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4519 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4525 /* If CTZ has a known value at zero, then the nonzero bits are
4526 that value, plus the number of bits in the mode minus one. */
4527 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4529 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4535 /* This is at most the number of bits in the mode minus 1. */
4536 nonzero
= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4545 unsigned HOST_WIDE_INT nonzero_true
4546 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4547 known_x
, known_mode
, known_ret
);
4549 /* Don't call nonzero_bits for the second time if it cannot change
4551 if ((nonzero
& nonzero_true
) != nonzero
)
4552 nonzero
&= nonzero_true
4553 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4554 known_x
, known_mode
, known_ret
);
4565 /* See the macro definition above. */
4566 #undef cached_num_sign_bit_copies
4569 /* The function cached_num_sign_bit_copies is a wrapper around
4570 num_sign_bit_copies1. It avoids exponential behavior in
4571 num_sign_bit_copies1 when X has identical subexpressions on the
4572 first or the second level. */
4575 cached_num_sign_bit_copies (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4576 enum machine_mode known_mode
,
4577 unsigned int known_ret
)
4579 if (x
== known_x
&& mode
== known_mode
)
4582 /* Try to find identical subexpressions. If found call
4583 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4584 the precomputed value for the subexpression as KNOWN_RET. */
4586 if (ARITHMETIC_P (x
))
4588 rtx x0
= XEXP (x
, 0);
4589 rtx x1
= XEXP (x
, 1);
4591 /* Check the first level. */
4594 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4595 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4599 /* Check the second level. */
4600 if (ARITHMETIC_P (x0
)
4601 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4603 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4604 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4608 if (ARITHMETIC_P (x1
)
4609 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4611 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4612 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4617 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4620 /* Return the number of bits at the high-order end of X that are known to
4621 be equal to the sign bit. X will be used in mode MODE; if MODE is
4622 VOIDmode, X will be used in its own mode. The returned value will always
4623 be between 1 and the number of bits in MODE. */
4626 num_sign_bit_copies1 (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4627 enum machine_mode known_mode
,
4628 unsigned int known_ret
)
4630 enum rtx_code code
= GET_CODE (x
);
4631 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4632 int num0
, num1
, result
;
4633 unsigned HOST_WIDE_INT nonzero
;
4635 /* If we weren't given a mode, use the mode of X. If the mode is still
4636 VOIDmode, we don't know anything. Likewise if one of the modes is
4639 if (mode
== VOIDmode
)
4640 mode
= GET_MODE (x
);
4642 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4643 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4646 /* For a smaller object, just ignore the high bits. */
4647 if (bitwidth
< GET_MODE_PRECISION (GET_MODE (x
)))
4649 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4650 known_x
, known_mode
, known_ret
);
4652 num0
- (int) (GET_MODE_PRECISION (GET_MODE (x
)) - bitwidth
));
4655 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_PRECISION (GET_MODE (x
)))
4657 #ifndef WORD_REGISTER_OPERATIONS
4658 /* If this machine does not do all register operations on the entire
4659 register and MODE is wider than the mode of X, we can say nothing
4660 at all about the high-order bits. */
4663 /* Likewise on machines that do, if the mode of the object is smaller
4664 than a word and loads of that size don't sign extend, we can say
4665 nothing about the high order bits. */
4666 if (GET_MODE_PRECISION (GET_MODE (x
)) < BITS_PER_WORD
4667 #ifdef LOAD_EXTEND_OP
4668 && LOAD_EXTEND_OP (GET_MODE (x
)) != SIGN_EXTEND
4679 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4680 /* If pointers extend signed and this is a pointer in Pmode, say that
4681 all the bits above ptr_mode are known to be sign bit copies. */
4682 /* As we do not know which address space the pointer is referring to,
4683 we can do this only if the target does not support different pointer
4684 or address modes depending on the address space. */
4685 if (target_default_pointer_address_modes_p ()
4686 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4687 && mode
== Pmode
&& REG_POINTER (x
))
4688 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
4692 unsigned int copies_for_hook
= 1, copies
= 1;
4693 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4694 known_mode
, known_ret
,
4698 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4699 known_mode
, known_ret
);
4701 if (copies
> 1 || copies_for_hook
> 1)
4702 return MAX (copies
, copies_for_hook
);
4704 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4709 #ifdef LOAD_EXTEND_OP
4710 /* Some RISC machines sign-extend all loads of smaller than a word. */
4711 if (LOAD_EXTEND_OP (GET_MODE (x
)) == SIGN_EXTEND
)
4712 return MAX (1, ((int) bitwidth
4713 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1));
4718 /* If the constant is negative, take its 1's complement and remask.
4719 Then see how many zero bits we have. */
4720 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4721 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4722 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4723 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4725 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4728 /* If this is a SUBREG for a promoted object that is sign-extended
4729 and we are looking at it in a wider mode, we know that at least the
4730 high-order bits are known to be sign bit copies. */
4732 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_SIGNED_P (x
))
4734 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4735 known_x
, known_mode
, known_ret
);
4736 return MAX ((int) bitwidth
4737 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1,
4741 /* For a smaller object, just ignore the high bits. */
4742 if (bitwidth
<= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
))))
4744 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4745 known_x
, known_mode
, known_ret
);
4746 return MAX (1, (num0
4747 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
)))
4751 #ifdef WORD_REGISTER_OPERATIONS
4752 #ifdef LOAD_EXTEND_OP
4753 /* For paradoxical SUBREGs on machines where all register operations
4754 affect the entire register, just look inside. Note that we are
4755 passing MODE to the recursive call, so the number of sign bit copies
4756 will remain relative to that mode, not the inner mode. */
4758 /* This works only if loads sign extend. Otherwise, if we get a
4759 reload for the inner part, it may be loaded from the stack, and
4760 then we lose all sign bit copies that existed before the store
4763 if (paradoxical_subreg_p (x
)
4764 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4765 && MEM_P (SUBREG_REG (x
)))
4766 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4767 known_x
, known_mode
, known_ret
);
4773 if (CONST_INT_P (XEXP (x
, 1)))
4774 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
4778 return (bitwidth
- GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4779 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4780 known_x
, known_mode
, known_ret
));
4783 /* For a smaller object, just ignore the high bits. */
4784 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4785 known_x
, known_mode
, known_ret
);
4786 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4790 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4791 known_x
, known_mode
, known_ret
);
4793 case ROTATE
: case ROTATERT
:
4794 /* If we are rotating left by a number of bits less than the number
4795 of sign bit copies, we can just subtract that amount from the
4797 if (CONST_INT_P (XEXP (x
, 1))
4798 && INTVAL (XEXP (x
, 1)) >= 0
4799 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
4801 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4802 known_x
, known_mode
, known_ret
);
4803 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
4804 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
4809 /* In general, this subtracts one sign bit copy. But if the value
4810 is known to be positive, the number of sign bit copies is the
4811 same as that of the input. Finally, if the input has just one bit
4812 that might be nonzero, all the bits are copies of the sign bit. */
4813 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4814 known_x
, known_mode
, known_ret
);
4815 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4816 return num0
> 1 ? num0
- 1 : 1;
4818 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4823 && (((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
))
4828 case IOR
: case AND
: case XOR
:
4829 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
4830 /* Logical operations will preserve the number of sign-bit copies.
4831 MIN and MAX operations always return one of the operands. */
4832 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4833 known_x
, known_mode
, known_ret
);
4834 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4835 known_x
, known_mode
, known_ret
);
4837 /* If num1 is clearing some of the top bits then regardless of
4838 the other term, we are guaranteed to have at least that many
4839 high-order zero bits. */
4842 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4843 && CONST_INT_P (XEXP (x
, 1))
4844 && (UINTVAL (XEXP (x
, 1))
4845 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) == 0)
4848 /* Similarly for IOR when setting high-order bits. */
4851 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4852 && CONST_INT_P (XEXP (x
, 1))
4853 && (UINTVAL (XEXP (x
, 1))
4854 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4857 return MIN (num0
, num1
);
4859 case PLUS
: case MINUS
:
4860 /* For addition and subtraction, we can have a 1-bit carry. However,
4861 if we are subtracting 1 from a positive number, there will not
4862 be such a carry. Furthermore, if the positive number is known to
4863 be 0 or 1, we know the result is either -1 or 0. */
4865 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
4866 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
4868 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4869 if ((((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
) == 0)
4870 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
4871 : bitwidth
- floor_log2 (nonzero
) - 1);
4874 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4875 known_x
, known_mode
, known_ret
);
4876 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4877 known_x
, known_mode
, known_ret
);
4878 result
= MAX (1, MIN (num0
, num1
) - 1);
4883 /* The number of bits of the product is the sum of the number of
4884 bits of both terms. However, unless one of the terms if known
4885 to be positive, we must allow for an additional bit since negating
4886 a negative number can remove one sign bit copy. */
4888 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4889 known_x
, known_mode
, known_ret
);
4890 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4891 known_x
, known_mode
, known_ret
);
4893 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
4895 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4896 || (((nonzero_bits (XEXP (x
, 0), mode
)
4897 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4898 && ((nonzero_bits (XEXP (x
, 1), mode
)
4899 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)))
4903 return MAX (1, result
);
4906 /* The result must be <= the first operand. If the first operand
4907 has the high bit set, we know nothing about the number of sign
4909 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4911 else if ((nonzero_bits (XEXP (x
, 0), mode
)
4912 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4915 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4916 known_x
, known_mode
, known_ret
);
4919 /* The result must be <= the second operand. If the second operand
4920 has (or just might have) the high bit set, we know nothing about
4921 the number of sign bit copies. */
4922 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4924 else if ((nonzero_bits (XEXP (x
, 1), mode
)
4925 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4928 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4929 known_x
, known_mode
, known_ret
);
4932 /* Similar to unsigned division, except that we have to worry about
4933 the case where the divisor is negative, in which case we have
4935 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4936 known_x
, known_mode
, known_ret
);
4938 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4939 || (nonzero_bits (XEXP (x
, 1), mode
)
4940 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4946 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4947 known_x
, known_mode
, known_ret
);
4949 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4950 || (nonzero_bits (XEXP (x
, 1), mode
)
4951 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4957 /* Shifts by a constant add to the number of bits equal to the
4959 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4960 known_x
, known_mode
, known_ret
);
4961 if (CONST_INT_P (XEXP (x
, 1))
4962 && INTVAL (XEXP (x
, 1)) > 0
4963 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4964 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
4969 /* Left shifts destroy copies. */
4970 if (!CONST_INT_P (XEXP (x
, 1))
4971 || INTVAL (XEXP (x
, 1)) < 0
4972 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
4973 || INTVAL (XEXP (x
, 1)) >= GET_MODE_PRECISION (GET_MODE (x
)))
4976 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4977 known_x
, known_mode
, known_ret
);
4978 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
4981 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4982 known_x
, known_mode
, known_ret
);
4983 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
4984 known_x
, known_mode
, known_ret
);
4985 return MIN (num0
, num1
);
4987 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
4988 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
4989 case GEU
: case GTU
: case LEU
: case LTU
:
4990 case UNORDERED
: case ORDERED
:
4991 /* If the constant is negative, take its 1's complement and remask.
4992 Then see how many zero bits we have. */
4993 nonzero
= STORE_FLAG_VALUE
;
4994 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4995 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4996 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4998 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
5004 /* If we haven't been able to figure it out by one of the above rules,
5005 see if some of the high-order bits are known to be zero. If so,
5006 count those bits and return one less than that amount. If we can't
5007 safely compute the mask for this mode, always return BITWIDTH. */
5009 bitwidth
= GET_MODE_PRECISION (mode
);
5010 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
5013 nonzero
= nonzero_bits (x
, mode
);
5014 return nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))
5015 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
5018 /* Calculate the rtx_cost of a single instruction. A return value of
5019 zero indicates an instruction pattern without a known cost. */
5022 insn_rtx_cost (rtx pat
, bool speed
)
5027 /* Extract the single set rtx from the instruction pattern.
5028 We can't use single_set since we only have the pattern. */
5029 if (GET_CODE (pat
) == SET
)
5031 else if (GET_CODE (pat
) == PARALLEL
)
5034 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
5036 rtx x
= XVECEXP (pat
, 0, i
);
5037 if (GET_CODE (x
) == SET
)
5050 cost
= set_src_cost (SET_SRC (set
), speed
);
5051 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
5054 /* Given an insn INSN and condition COND, return the condition in a
5055 canonical form to simplify testing by callers. Specifically:
5057 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5058 (2) Both operands will be machine operands; (cc0) will have been replaced.
5059 (3) If an operand is a constant, it will be the second operand.
5060 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5061 for GE, GEU, and LEU.
5063 If the condition cannot be understood, or is an inequality floating-point
5064 comparison which needs to be reversed, 0 will be returned.
5066 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5068 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5069 insn used in locating the condition was found. If a replacement test
5070 of the condition is desired, it should be placed in front of that
5071 insn and we will be sure that the inputs are still valid.
5073 If WANT_REG is nonzero, we wish the condition to be relative to that
5074 register, if possible. Therefore, do not canonicalize the condition
5075 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5076 to be a compare to a CC mode register.
5078 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5082 canonicalize_condition (rtx_insn
*insn
, rtx cond
, int reverse
,
5083 rtx_insn
**earliest
,
5084 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
5087 rtx_insn
*prev
= insn
;
5091 int reverse_code
= 0;
5092 enum machine_mode mode
;
5093 basic_block bb
= BLOCK_FOR_INSN (insn
);
5095 code
= GET_CODE (cond
);
5096 mode
= GET_MODE (cond
);
5097 op0
= XEXP (cond
, 0);
5098 op1
= XEXP (cond
, 1);
5101 code
= reversed_comparison_code (cond
, insn
);
5102 if (code
== UNKNOWN
)
5108 /* If we are comparing a register with zero, see if the register is set
5109 in the previous insn to a COMPARE or a comparison operation. Perform
5110 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5113 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5114 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5115 && op1
== CONST0_RTX (GET_MODE (op0
))
5118 /* Set nonzero when we find something of interest. */
5122 /* If comparison with cc0, import actual comparison from compare
5126 if ((prev
= prev_nonnote_insn (prev
)) == 0
5127 || !NONJUMP_INSN_P (prev
)
5128 || (set
= single_set (prev
)) == 0
5129 || SET_DEST (set
) != cc0_rtx
)
5132 op0
= SET_SRC (set
);
5133 op1
= CONST0_RTX (GET_MODE (op0
));
5139 /* If this is a COMPARE, pick up the two things being compared. */
5140 if (GET_CODE (op0
) == COMPARE
)
5142 op1
= XEXP (op0
, 1);
5143 op0
= XEXP (op0
, 0);
5146 else if (!REG_P (op0
))
5149 /* Go back to the previous insn. Stop if it is not an INSN. We also
5150 stop if it isn't a single set or if it has a REG_INC note because
5151 we don't want to bother dealing with it. */
5153 prev
= prev_nonnote_nondebug_insn (prev
);
5156 || !NONJUMP_INSN_P (prev
)
5157 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5158 /* In cfglayout mode, there do not have to be labels at the
5159 beginning of a block, or jumps at the end, so the previous
5160 conditions would not stop us when we reach bb boundary. */
5161 || BLOCK_FOR_INSN (prev
) != bb
)
5164 set
= set_of (op0
, prev
);
5167 && (GET_CODE (set
) != SET
5168 || !rtx_equal_p (SET_DEST (set
), op0
)))
5171 /* If this is setting OP0, get what it sets it to if it looks
5175 enum machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5176 #ifdef FLOAT_STORE_FLAG_VALUE
5177 REAL_VALUE_TYPE fsfv
;
5180 /* ??? We may not combine comparisons done in a CCmode with
5181 comparisons not done in a CCmode. This is to aid targets
5182 like Alpha that have an IEEE compliant EQ instruction, and
5183 a non-IEEE compliant BEQ instruction. The use of CCmode is
5184 actually artificial, simply to prevent the combination, but
5185 should not affect other platforms.
5187 However, we must allow VOIDmode comparisons to match either
5188 CCmode or non-CCmode comparison, because some ports have
5189 modeless comparisons inside branch patterns.
5191 ??? This mode check should perhaps look more like the mode check
5192 in simplify_comparison in combine. */
5193 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5194 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5196 && inner_mode
!= VOIDmode
)
5198 if (GET_CODE (SET_SRC (set
)) == COMPARE
5201 && val_signbit_known_set_p (inner_mode
,
5203 #ifdef FLOAT_STORE_FLAG_VALUE
5205 && SCALAR_FLOAT_MODE_P (inner_mode
)
5206 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5207 REAL_VALUE_NEGATIVE (fsfv
)))
5210 && COMPARISON_P (SET_SRC (set
))))
5212 else if (((code
== EQ
5214 && val_signbit_known_set_p (inner_mode
,
5216 #ifdef FLOAT_STORE_FLAG_VALUE
5218 && SCALAR_FLOAT_MODE_P (inner_mode
)
5219 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5220 REAL_VALUE_NEGATIVE (fsfv
)))
5223 && COMPARISON_P (SET_SRC (set
)))
5228 else if ((code
== EQ
|| code
== NE
)
5229 && GET_CODE (SET_SRC (set
)) == XOR
)
5230 /* Handle sequences like:
5233 ...(eq|ne op0 (const_int 0))...
5237 (eq op0 (const_int 0)) reduces to (eq X Y)
5238 (ne op0 (const_int 0)) reduces to (ne X Y)
5240 This is the form used by MIPS16, for example. */
5246 else if (reg_set_p (op0
, prev
))
5247 /* If this sets OP0, but not directly, we have to give up. */
5252 /* If the caller is expecting the condition to be valid at INSN,
5253 make sure X doesn't change before INSN. */
5254 if (valid_at_insn_p
)
5255 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5257 if (COMPARISON_P (x
))
5258 code
= GET_CODE (x
);
5261 code
= reversed_comparison_code (x
, prev
);
5262 if (code
== UNKNOWN
)
5267 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5273 /* If constant is first, put it last. */
5274 if (CONSTANT_P (op0
))
5275 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5277 /* If OP0 is the result of a comparison, we weren't able to find what
5278 was really being compared, so fail. */
5280 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5283 /* Canonicalize any ordered comparison with integers involving equality
5284 if we can do computations in the relevant mode and we do not
5287 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
5288 && CONST_INT_P (op1
)
5289 && GET_MODE (op0
) != VOIDmode
5290 && GET_MODE_PRECISION (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
5292 HOST_WIDE_INT const_val
= INTVAL (op1
);
5293 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5294 unsigned HOST_WIDE_INT max_val
5295 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
5300 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5301 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
5304 /* When cross-compiling, const_val might be sign-extended from
5305 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5307 if ((const_val
& max_val
)
5308 != ((unsigned HOST_WIDE_INT
) 1
5309 << (GET_MODE_PRECISION (GET_MODE (op0
)) - 1)))
5310 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
5314 if (uconst_val
< max_val
)
5315 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
5319 if (uconst_val
!= 0)
5320 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
5328 /* Never return CC0; return zero instead. */
5332 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5335 /* Given a jump insn JUMP, return the condition that will cause it to branch
5336 to its JUMP_LABEL. If the condition cannot be understood, or is an
5337 inequality floating-point comparison which needs to be reversed, 0 will
5340 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5341 insn used in locating the condition was found. If a replacement test
5342 of the condition is desired, it should be placed in front of that
5343 insn and we will be sure that the inputs are still valid. If EARLIEST
5344 is null, the returned condition will be valid at INSN.
5346 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5347 compare CC mode register.
5349 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5352 get_condition (rtx_insn
*jump
, rtx_insn
**earliest
, int allow_cc_mode
,
5353 int valid_at_insn_p
)
5359 /* If this is not a standard conditional jump, we can't parse it. */
5361 || ! any_condjump_p (jump
))
5363 set
= pc_set (jump
);
5365 cond
= XEXP (SET_SRC (set
), 0);
5367 /* If this branches to JUMP_LABEL when the condition is false, reverse
5370 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5371 && LABEL_REF_LABEL (XEXP (SET_SRC (set
), 2)) == JUMP_LABEL (jump
);
5373 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5374 allow_cc_mode
, valid_at_insn_p
);
5377 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5378 TARGET_MODE_REP_EXTENDED.
5380 Note that we assume that the property of
5381 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5382 narrower than mode B. I.e., if A is a mode narrower than B then in
5383 order to be able to operate on it in mode B, mode A needs to
5384 satisfy the requirements set by the representation of mode B. */
5387 init_num_sign_bit_copies_in_rep (void)
5389 enum machine_mode mode
, in_mode
;
5391 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
5392 in_mode
= GET_MODE_WIDER_MODE (mode
))
5393 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
5394 mode
= GET_MODE_WIDER_MODE (mode
))
5396 enum machine_mode i
;
5398 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5399 extends to the next widest mode. */
5400 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5401 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
5403 /* We are in in_mode. Count how many bits outside of mode
5404 have to be copies of the sign-bit. */
5405 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
5407 enum machine_mode wider
= GET_MODE_WIDER_MODE (i
);
5409 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5410 /* We can only check sign-bit copies starting from the
5411 top-bit. In order to be able to check the bits we
5412 have already seen we pretend that subsequent bits
5413 have to be sign-bit copies too. */
5414 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5415 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5416 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5421 /* Suppose that truncation from the machine mode of X to MODE is not a
5422 no-op. See if there is anything special about X so that we can
5423 assume it already contains a truncated value of MODE. */
5426 truncated_to_mode (enum machine_mode mode
, const_rtx x
)
5428 /* This register has already been used in MODE without explicit
5430 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5433 /* See if we already satisfy the requirements of MODE. If yes we
5434 can just switch to MODE. */
5435 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5436 && (num_sign_bit_copies (x
, GET_MODE (x
))
5437 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5443 /* Return true if RTX code CODE has a single sequence of zero or more
5444 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5445 entry in that case. */
5448 setup_reg_subrtx_bounds (unsigned int code
)
5450 const char *format
= GET_RTX_FORMAT ((enum rtx_code
) code
);
5452 for (; format
[i
] != 'e'; ++i
)
5455 /* No subrtxes. Leave start and count as 0. */
5457 if (format
[i
] == 'E' || format
[i
] == 'V')
5461 /* Record the sequence of 'e's. */
5462 rtx_all_subrtx_bounds
[code
].start
= i
;
5465 while (format
[i
] == 'e');
5466 rtx_all_subrtx_bounds
[code
].count
= i
- rtx_all_subrtx_bounds
[code
].start
;
5467 /* rtl-iter.h relies on this. */
5468 gcc_checking_assert (rtx_all_subrtx_bounds
[code
].count
<= 3);
5470 for (; format
[i
]; ++i
)
5471 if (format
[i
] == 'E' || format
[i
] == 'V' || format
[i
] == 'e')
5477 /* Initialize non_rtx_starting_operands, which is used to speed up
5478 for_each_rtx, and rtx_all_subrtx_bounds. */
5483 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5485 const char *format
= GET_RTX_FORMAT (i
);
5486 const char *first
= strpbrk (format
, "eEV");
5487 non_rtx_starting_operands
[i
] = first
? first
- format
: -1;
5488 if (!setup_reg_subrtx_bounds (i
))
5489 rtx_all_subrtx_bounds
[i
].count
= UCHAR_MAX
;
5490 if (GET_RTX_CLASS (i
) != RTX_CONST_OBJ
)
5491 rtx_nonconst_subrtx_bounds
[i
] = rtx_all_subrtx_bounds
[i
];
5494 init_num_sign_bit_copies_in_rep ();
5497 /* Check whether this is a constant pool constant. */
5499 constant_pool_constant_p (rtx x
)
5501 x
= avoid_constant_pool_reference (x
);
5502 return CONST_DOUBLE_P (x
);
5505 /* If M is a bitmask that selects a field of low-order bits within an item but
5506 not the entire word, return the length of the field. Return -1 otherwise.
5507 M is used in machine mode MODE. */
5510 low_bitmask_len (enum machine_mode mode
, unsigned HOST_WIDE_INT m
)
5512 if (mode
!= VOIDmode
)
5514 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
5516 m
&= GET_MODE_MASK (mode
);
5519 return exact_log2 (m
+ 1);
5522 /* Return the mode of MEM's address. */
5525 get_address_mode (rtx mem
)
5527 enum machine_mode mode
;
5529 gcc_assert (MEM_P (mem
));
5530 mode
= GET_MODE (XEXP (mem
, 0));
5531 if (mode
!= VOIDmode
)
5533 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5536 /* Split up a CONST_DOUBLE or integer constant rtx
5537 into two rtx's for single words,
5538 storing in *FIRST the word that comes first in memory in the target
5539 and in *SECOND the other.
5541 TODO: This function needs to be rewritten to work on any size
5545 split_double (rtx value
, rtx
*first
, rtx
*second
)
5547 if (CONST_INT_P (value
))
5549 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5551 /* In this case the CONST_INT holds both target words.
5552 Extract the bits from it into two word-sized pieces.
5553 Sign extend each half to HOST_WIDE_INT. */
5554 unsigned HOST_WIDE_INT low
, high
;
5555 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5556 unsigned bits_per_word
= BITS_PER_WORD
;
5558 /* Set sign_bit to the most significant bit of a word. */
5560 sign_bit
<<= bits_per_word
- 1;
5562 /* Set mask so that all bits of the word are set. We could
5563 have used 1 << BITS_PER_WORD instead of basing the
5564 calculation on sign_bit. However, on machines where
5565 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5566 compiler warning, even though the code would never be
5568 mask
= sign_bit
<< 1;
5571 /* Set sign_extend as any remaining bits. */
5572 sign_extend
= ~mask
;
5574 /* Pick the lower word and sign-extend it. */
5575 low
= INTVAL (value
);
5580 /* Pick the higher word, shifted to the least significant
5581 bits, and sign-extend it. */
5582 high
= INTVAL (value
);
5583 high
>>= bits_per_word
- 1;
5586 if (high
& sign_bit
)
5587 high
|= sign_extend
;
5589 /* Store the words in the target machine order. */
5590 if (WORDS_BIG_ENDIAN
)
5592 *first
= GEN_INT (high
);
5593 *second
= GEN_INT (low
);
5597 *first
= GEN_INT (low
);
5598 *second
= GEN_INT (high
);
5603 /* The rule for using CONST_INT for a wider mode
5604 is that we regard the value as signed.
5605 So sign-extend it. */
5606 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
5607 if (WORDS_BIG_ENDIAN
)
5619 else if (GET_CODE (value
) == CONST_WIDE_INT
)
5621 /* All of this is scary code and needs to be converted to
5622 properly work with any size integer. */
5623 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
5624 if (WORDS_BIG_ENDIAN
)
5626 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5627 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5631 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5632 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5635 else if (!CONST_DOUBLE_P (value
))
5637 if (WORDS_BIG_ENDIAN
)
5639 *first
= const0_rtx
;
5645 *second
= const0_rtx
;
5648 else if (GET_MODE (value
) == VOIDmode
5649 /* This is the old way we did CONST_DOUBLE integers. */
5650 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
5652 /* In an integer, the words are defined as most and least significant.
5653 So order them by the target's convention. */
5654 if (WORDS_BIG_ENDIAN
)
5656 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5657 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
5661 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
5662 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5669 REAL_VALUE_FROM_CONST_DOUBLE (r
, value
);
5671 /* Note, this converts the REAL_VALUE_TYPE to the target's
5672 format, splits up the floating point double and outputs
5673 exactly 32 bits of it into each of l[0] and l[1] --
5674 not necessarily BITS_PER_WORD bits. */
5675 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
5677 /* If 32 bits is an entire word for the target, but not for the host,
5678 then sign-extend on the host so that the number will look the same
5679 way on the host that it would on the target. See for instance
5680 simplify_unary_operation. The #if is needed to avoid compiler
5683 #if HOST_BITS_PER_LONG > 32
5684 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
5686 if (l
[0] & ((long) 1 << 31))
5687 l
[0] |= ((long) (-1) << 32);
5688 if (l
[1] & ((long) 1 << 31))
5689 l
[1] |= ((long) (-1) << 32);
5693 *first
= GEN_INT (l
[0]);
5694 *second
= GEN_INT (l
[1]);
5698 /* Return true if X is a sign_extract or zero_extract from the least
5702 lsb_bitfield_op_p (rtx x
)
5704 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
5706 enum machine_mode mode
= GET_MODE (XEXP (x
, 0));
5707 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
5708 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
5710 return (pos
== (BITS_BIG_ENDIAN
? GET_MODE_PRECISION (mode
) - len
: 0));
5715 /* Strip outer address "mutations" from LOC and return a pointer to the
5716 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5717 stripped expression there.
5719 "Mutations" either convert between modes or apply some kind of
5720 extension, truncation or alignment. */
5723 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
5727 enum rtx_code code
= GET_CODE (*loc
);
5728 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
5729 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5730 used to convert between pointer sizes. */
5731 loc
= &XEXP (*loc
, 0);
5732 else if (lsb_bitfield_op_p (*loc
))
5733 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5734 acts as a combined truncation and extension. */
5735 loc
= &XEXP (*loc
, 0);
5736 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
5737 /* (and ... (const_int -X)) is used to align to X bytes. */
5738 loc
= &XEXP (*loc
, 0);
5739 else if (code
== SUBREG
5740 && !OBJECT_P (SUBREG_REG (*loc
))
5741 && subreg_lowpart_p (*loc
))
5742 /* (subreg (operator ...) ...) inside and is used for mode
5744 loc
= &SUBREG_REG (*loc
);
5752 /* Return true if CODE applies some kind of scale. The scaled value is
5753 is the first operand and the scale is the second. */
5756 binary_scale_code_p (enum rtx_code code
)
5758 return (code
== MULT
5760 /* Needed by ARM targets. */
5764 || code
== ROTATERT
);
5767 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5768 (see address_info). Return null otherwise. */
5771 get_base_term (rtx
*inner
)
5773 if (GET_CODE (*inner
) == LO_SUM
)
5774 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5777 || GET_CODE (*inner
) == SUBREG
)
5782 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5783 (see address_info). Return null otherwise. */
5786 get_index_term (rtx
*inner
)
5788 /* At present, only constant scales are allowed. */
5789 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
5790 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5793 || GET_CODE (*inner
) == SUBREG
)
5798 /* Set the segment part of address INFO to LOC, given that INNER is the
5802 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5804 gcc_assert (!info
->segment
);
5805 info
->segment
= loc
;
5806 info
->segment_term
= inner
;
5809 /* Set the base part of address INFO to LOC, given that INNER is the
5813 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5815 gcc_assert (!info
->base
);
5817 info
->base_term
= inner
;
5820 /* Set the index part of address INFO to LOC, given that INNER is the
5824 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5826 gcc_assert (!info
->index
);
5828 info
->index_term
= inner
;
5831 /* Set the displacement part of address INFO to LOC, given that INNER
5832 is the constant term. */
5835 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5837 gcc_assert (!info
->disp
);
5839 info
->disp_term
= inner
;
5842 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5843 rest of INFO accordingly. */
5846 decompose_incdec_address (struct address_info
*info
)
5848 info
->autoinc_p
= true;
5850 rtx
*base
= &XEXP (*info
->inner
, 0);
5851 set_address_base (info
, base
, base
);
5852 gcc_checking_assert (info
->base
== info
->base_term
);
5854 /* These addresses are only valid when the size of the addressed
5856 gcc_checking_assert (info
->mode
!= VOIDmode
);
5859 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5860 of INFO accordingly. */
5863 decompose_automod_address (struct address_info
*info
)
5865 info
->autoinc_p
= true;
5867 rtx
*base
= &XEXP (*info
->inner
, 0);
5868 set_address_base (info
, base
, base
);
5869 gcc_checking_assert (info
->base
== info
->base_term
);
5871 rtx plus
= XEXP (*info
->inner
, 1);
5872 gcc_assert (GET_CODE (plus
) == PLUS
);
5874 info
->base_term2
= &XEXP (plus
, 0);
5875 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
5877 rtx
*step
= &XEXP (plus
, 1);
5878 rtx
*inner_step
= strip_address_mutations (step
);
5879 if (CONSTANT_P (*inner_step
))
5880 set_address_disp (info
, step
, inner_step
);
5882 set_address_index (info
, step
, inner_step
);
5885 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5886 values in [PTR, END). Return a pointer to the end of the used array. */
5889 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
5892 if (GET_CODE (x
) == PLUS
)
5894 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
5895 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
5899 gcc_assert (ptr
!= end
);
5905 /* Evaluate the likelihood of X being a base or index value, returning
5906 positive if it is likely to be a base, negative if it is likely to be
5907 an index, and 0 if we can't tell. Make the magnitude of the return
5908 value reflect the amount of confidence we have in the answer.
5910 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5913 baseness (rtx x
, enum machine_mode mode
, addr_space_t as
,
5914 enum rtx_code outer_code
, enum rtx_code index_code
)
5916 /* Believe *_POINTER unless the address shape requires otherwise. */
5917 if (REG_P (x
) && REG_POINTER (x
))
5919 if (MEM_P (x
) && MEM_POINTER (x
))
5922 if (REG_P (x
) && HARD_REGISTER_P (x
))
5924 /* X is a hard register. If it only fits one of the base
5925 or index classes, choose that interpretation. */
5926 int regno
= REGNO (x
);
5927 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
5928 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
5929 if (base_p
!= index_p
)
5930 return base_p
? 1 : -1;
5935 /* INFO->INNER describes a normal, non-automodified address.
5936 Fill in the rest of INFO accordingly. */
5939 decompose_normal_address (struct address_info
*info
)
5941 /* Treat the address as the sum of up to four values. */
5943 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
5944 ops
+ ARRAY_SIZE (ops
)) - ops
;
5946 /* If there is more than one component, any base component is in a PLUS. */
5948 info
->base_outer_code
= PLUS
;
5950 /* Try to classify each sum operand now. Leave those that could be
5951 either a base or an index in OPS. */
5954 for (size_t in
= 0; in
< n_ops
; ++in
)
5957 rtx
*inner
= strip_address_mutations (loc
);
5958 if (CONSTANT_P (*inner
))
5959 set_address_disp (info
, loc
, inner
);
5960 else if (GET_CODE (*inner
) == UNSPEC
)
5961 set_address_segment (info
, loc
, inner
);
5964 /* The only other possibilities are a base or an index. */
5965 rtx
*base_term
= get_base_term (inner
);
5966 rtx
*index_term
= get_index_term (inner
);
5967 gcc_assert (base_term
|| index_term
);
5969 set_address_index (info
, loc
, index_term
);
5970 else if (!index_term
)
5971 set_address_base (info
, loc
, base_term
);
5974 gcc_assert (base_term
== index_term
);
5976 inner_ops
[out
] = base_term
;
5982 /* Classify the remaining OPS members as bases and indexes. */
5985 /* If we haven't seen a base or an index yet, assume that this is
5986 the base. If we were confident that another term was the base
5987 or index, treat the remaining operand as the other kind. */
5989 set_address_base (info
, ops
[0], inner_ops
[0]);
5991 set_address_index (info
, ops
[0], inner_ops
[0]);
5995 /* In the event of a tie, assume the base comes first. */
5996 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
5998 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
5999 GET_CODE (*ops
[0])))
6001 set_address_base (info
, ops
[0], inner_ops
[0]);
6002 set_address_index (info
, ops
[1], inner_ops
[1]);
6006 set_address_base (info
, ops
[1], inner_ops
[1]);
6007 set_address_index (info
, ops
[0], inner_ops
[0]);
6011 gcc_assert (out
== 0);
6014 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6015 or VOIDmode if not known. AS is the address space associated with LOC.
6016 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6019 decompose_address (struct address_info
*info
, rtx
*loc
, enum machine_mode mode
,
6020 addr_space_t as
, enum rtx_code outer_code
)
6022 memset (info
, 0, sizeof (*info
));
6025 info
->addr_outer_code
= outer_code
;
6027 info
->inner
= strip_address_mutations (loc
, &outer_code
);
6028 info
->base_outer_code
= outer_code
;
6029 switch (GET_CODE (*info
->inner
))
6035 decompose_incdec_address (info
);
6040 decompose_automod_address (info
);
6044 decompose_normal_address (info
);
6049 /* Describe address operand LOC in INFO. */
6052 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
6054 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
6057 /* Describe the address of MEM X in INFO. */
6060 decompose_mem_address (struct address_info
*info
, rtx x
)
6062 gcc_assert (MEM_P (x
));
6063 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
6064 MEM_ADDR_SPACE (x
), MEM
);
6067 /* Update INFO after a change to the address it describes. */
6070 update_address (struct address_info
*info
)
6072 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
6073 info
->addr_outer_code
);
6076 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6077 more complicated than that. */
6080 get_index_scale (const struct address_info
*info
)
6082 rtx index
= *info
->index
;
6083 if (GET_CODE (index
) == MULT
6084 && CONST_INT_P (XEXP (index
, 1))
6085 && info
->index_term
== &XEXP (index
, 0))
6086 return INTVAL (XEXP (index
, 1));
6088 if (GET_CODE (index
) == ASHIFT
6089 && CONST_INT_P (XEXP (index
, 1))
6090 && info
->index_term
== &XEXP (index
, 0))
6091 return (HOST_WIDE_INT
) 1 << INTVAL (XEXP (index
, 1));
6093 if (info
->index
== info
->index_term
)
6099 /* Return the "index code" of INFO, in the form required by
6103 get_index_code (const struct address_info
*info
)
6106 return GET_CODE (*info
->index
);
6109 return GET_CODE (*info
->disp
);
6114 /* Return true if X contains a thread-local symbol. */
6117 tls_referenced_p (const_rtx x
)
6119 if (!targetm
.have_tls
)
6122 subrtx_iterator::array_type array
;
6123 FOR_EACH_SUBRTX (iter
, array
, x
, ALL
)
6124 if (GET_CODE (*iter
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*iter
) != 0)