1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
25 #include "diagnostic-core.h"
26 #include "hard-reg-set.h"
28 #include "insn-config.h"
38 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
39 #include "addresses.h"
41 /* Forward declarations */
42 static void set_of_1 (rtx
, const_rtx
, void *);
43 static bool covers_regno_p (const_rtx
, unsigned int);
44 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
45 static int rtx_referenced_p_1 (rtx
*, void *);
46 static int computed_jump_p_1 (const_rtx
);
47 static void parms_set (rtx
, const_rtx
, void *);
49 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, enum machine_mode
,
50 const_rtx
, enum machine_mode
,
51 unsigned HOST_WIDE_INT
);
52 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, enum machine_mode
,
53 const_rtx
, enum machine_mode
,
54 unsigned HOST_WIDE_INT
);
55 static unsigned int cached_num_sign_bit_copies (const_rtx
, enum machine_mode
, const_rtx
,
58 static unsigned int num_sign_bit_copies1 (const_rtx
, enum machine_mode
, const_rtx
,
59 enum machine_mode
, unsigned int);
61 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
62 -1 if a code has no such operand. */
63 static int non_rtx_starting_operands
[NUM_RTX_CODE
];
65 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
66 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
67 SIGN_EXTEND then while narrowing we also have to enforce the
68 representation and sign-extend the value to mode DESTINATION_REP.
70 If the value is already sign-extended to DESTINATION_REP mode we
71 can just switch to DESTINATION mode on it. For each pair of
72 integral modes SOURCE and DESTINATION, when truncating from SOURCE
73 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
74 contains the number of high-order bits in SOURCE that have to be
75 copies of the sign-bit so that we can do this mode-switch to
79 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
81 /* Return 1 if the value of X is unstable
82 (would be different at a different point in the program).
83 The frame pointer, arg pointer, etc. are considered stable
84 (within one function) and so is anything marked `unchanging'. */
87 rtx_unstable_p (const_rtx x
)
89 const RTX_CODE code
= GET_CODE (x
);
96 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
105 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
106 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
107 /* The arg pointer varies if it is not a fixed register. */
108 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
110 /* ??? When call-clobbered, the value is stable modulo the restore
111 that must happen after a call. This currently screws up local-alloc
112 into believing that the restore is not needed. */
113 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
118 if (MEM_VOLATILE_P (x
))
127 fmt
= GET_RTX_FORMAT (code
);
128 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
131 if (rtx_unstable_p (XEXP (x
, i
)))
134 else if (fmt
[i
] == 'E')
137 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
138 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
145 /* Return 1 if X has a value that can vary even between two
146 executions of the program. 0 means X can be compared reliably
147 against certain constants or near-constants.
148 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
149 zero, we are slightly more conservative.
150 The frame pointer and the arg pointer are considered constant. */
153 rtx_varies_p (const_rtx x
, bool for_alias
)
166 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
175 /* Note that we have to test for the actual rtx used for the frame
176 and arg pointers and not just the register number in case we have
177 eliminated the frame and/or arg pointer and are using it
179 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
180 /* The arg pointer varies if it is not a fixed register. */
181 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
183 if (x
== pic_offset_table_rtx
184 /* ??? When call-clobbered, the value is stable modulo the restore
185 that must happen after a call. This currently screws up
186 local-alloc into believing that the restore is not needed, so we
187 must return 0 only if we are called from alias analysis. */
188 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
193 /* The operand 0 of a LO_SUM is considered constant
194 (in fact it is related specifically to operand 1)
195 during alias analysis. */
196 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
197 || rtx_varies_p (XEXP (x
, 1), for_alias
);
200 if (MEM_VOLATILE_P (x
))
209 fmt
= GET_RTX_FORMAT (code
);
210 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
213 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
216 else if (fmt
[i
] == 'E')
219 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
220 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
227 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
228 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
229 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
230 references on strict alignment machines. */
233 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
234 enum machine_mode mode
, bool unaligned_mems
)
236 enum rtx_code code
= GET_CODE (x
);
238 /* The offset must be a multiple of the mode size if we are considering
239 unaligned memory references on strict alignment machines. */
240 if (STRICT_ALIGNMENT
&& unaligned_mems
&& GET_MODE_SIZE (mode
) != 0)
242 HOST_WIDE_INT actual_offset
= offset
;
244 #ifdef SPARC_STACK_BOUNDARY_HACK
245 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
246 the real alignment of %sp. However, when it does this, the
247 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
248 if (SPARC_STACK_BOUNDARY_HACK
249 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
250 actual_offset
-= STACK_POINTER_OFFSET
;
253 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
260 if (SYMBOL_REF_WEAK (x
))
262 if (!CONSTANT_POOL_ADDRESS_P (x
))
265 HOST_WIDE_INT decl_size
;
270 size
= GET_MODE_SIZE (mode
);
274 /* If the size of the access or of the symbol is unknown,
276 decl
= SYMBOL_REF_DECL (x
);
278 /* Else check that the access is in bounds. TODO: restructure
279 expr_size/tree_expr_size/int_expr_size and just use the latter. */
282 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
283 decl_size
= (tree_fits_shwi_p (DECL_SIZE_UNIT (decl
))
284 ? tree_to_shwi (DECL_SIZE_UNIT (decl
))
286 else if (TREE_CODE (decl
) == STRING_CST
)
287 decl_size
= TREE_STRING_LENGTH (decl
);
288 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
289 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
293 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
302 /* Stack references are assumed not to trap, but we need to deal with
303 nonsensical offsets. */
304 if (x
== frame_pointer_rtx
)
306 HOST_WIDE_INT adj_offset
= offset
- STARTING_FRAME_OFFSET
;
308 size
= GET_MODE_SIZE (mode
);
309 if (FRAME_GROWS_DOWNWARD
)
311 if (adj_offset
< frame_offset
|| adj_offset
+ size
- 1 >= 0)
316 if (adj_offset
< 0 || adj_offset
+ size
- 1 >= frame_offset
)
321 /* ??? Need to add a similar guard for nonsensical offsets. */
322 if (x
== hard_frame_pointer_rtx
323 || x
== stack_pointer_rtx
324 /* The arg pointer varies if it is not a fixed register. */
325 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
327 /* All of the virtual frame registers are stack references. */
328 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
329 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
334 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
335 mode
, unaligned_mems
);
338 /* An address is assumed not to trap if:
339 - it is the pic register plus a constant. */
340 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
343 /* - or it is an address that can't trap plus a constant integer. */
344 if (CONST_INT_P (XEXP (x
, 1))
345 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
346 size
, mode
, unaligned_mems
))
353 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
354 mode
, unaligned_mems
);
361 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
362 mode
, unaligned_mems
);
368 /* If it isn't one of the case above, it can cause a trap. */
372 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
375 rtx_addr_can_trap_p (const_rtx x
)
377 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
380 /* Return true if X is an address that is known to not be zero. */
383 nonzero_address_p (const_rtx x
)
385 const enum rtx_code code
= GET_CODE (x
);
390 return !SYMBOL_REF_WEAK (x
);
396 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
397 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
398 || x
== stack_pointer_rtx
399 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
401 /* All of the virtual frame registers are stack references. */
402 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
403 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
408 return nonzero_address_p (XEXP (x
, 0));
411 /* Handle PIC references. */
412 if (XEXP (x
, 0) == pic_offset_table_rtx
413 && CONSTANT_P (XEXP (x
, 1)))
418 /* Similar to the above; allow positive offsets. Further, since
419 auto-inc is only allowed in memories, the register must be a
421 if (CONST_INT_P (XEXP (x
, 1))
422 && INTVAL (XEXP (x
, 1)) > 0)
424 return nonzero_address_p (XEXP (x
, 0));
427 /* Similarly. Further, the offset is always positive. */
434 return nonzero_address_p (XEXP (x
, 0));
437 return nonzero_address_p (XEXP (x
, 1));
443 /* If it isn't one of the case above, might be zero. */
447 /* Return 1 if X refers to a memory location whose address
448 cannot be compared reliably with constant addresses,
449 or if X refers to a BLKmode memory object.
450 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
451 zero, we are slightly more conservative. */
454 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
465 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
467 fmt
= GET_RTX_FORMAT (code
);
468 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
471 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
474 else if (fmt
[i
] == 'E')
477 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
478 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
484 /* Return the CALL in X if there is one. */
487 get_call_rtx_from (rtx x
)
491 if (GET_CODE (x
) == PARALLEL
)
492 x
= XVECEXP (x
, 0, 0);
493 if (GET_CODE (x
) == SET
)
495 if (GET_CODE (x
) == CALL
&& MEM_P (XEXP (x
, 0)))
500 /* Return the value of the integer term in X, if one is apparent;
502 Only obvious integer terms are detected.
503 This is used in cse.c with the `related_value' field. */
506 get_integer_term (const_rtx x
)
508 if (GET_CODE (x
) == CONST
)
511 if (GET_CODE (x
) == MINUS
512 && CONST_INT_P (XEXP (x
, 1)))
513 return - INTVAL (XEXP (x
, 1));
514 if (GET_CODE (x
) == PLUS
515 && CONST_INT_P (XEXP (x
, 1)))
516 return INTVAL (XEXP (x
, 1));
520 /* If X is a constant, return the value sans apparent integer term;
522 Only obvious integer terms are detected. */
525 get_related_value (const_rtx x
)
527 if (GET_CODE (x
) != CONST
)
530 if (GET_CODE (x
) == PLUS
531 && CONST_INT_P (XEXP (x
, 1)))
533 else if (GET_CODE (x
) == MINUS
534 && CONST_INT_P (XEXP (x
, 1)))
539 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
540 to somewhere in the same object or object_block as SYMBOL. */
543 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
547 if (GET_CODE (symbol
) != SYMBOL_REF
)
555 if (CONSTANT_POOL_ADDRESS_P (symbol
)
556 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
559 decl
= SYMBOL_REF_DECL (symbol
);
560 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
564 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
565 && SYMBOL_REF_BLOCK (symbol
)
566 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
567 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
568 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
574 /* Split X into a base and a constant offset, storing them in *BASE_OUT
575 and *OFFSET_OUT respectively. */
578 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
580 if (GET_CODE (x
) == CONST
)
583 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
585 *base_out
= XEXP (x
, 0);
586 *offset_out
= XEXP (x
, 1);
591 *offset_out
= const0_rtx
;
594 /* Return the number of places FIND appears within X. If COUNT_DEST is
595 zero, we do not count occurrences inside the destination of a SET. */
598 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
602 const char *format_ptr
;
621 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
623 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
627 if (MEM_P (find
) && rtx_equal_p (x
, find
))
632 if (SET_DEST (x
) == find
&& ! count_dest
)
633 return count_occurrences (SET_SRC (x
), find
, count_dest
);
640 format_ptr
= GET_RTX_FORMAT (code
);
643 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
645 switch (*format_ptr
++)
648 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
652 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
653 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
661 /* Return TRUE if OP is a register or subreg of a register that
662 holds an unsigned quantity. Otherwise, return FALSE. */
665 unsigned_reg_p (rtx op
)
669 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op
))))
672 if (GET_CODE (op
) == SUBREG
673 && SUBREG_PROMOTED_UNSIGNED_P (op
))
680 /* Nonzero if register REG appears somewhere within IN.
681 Also works if REG is not a register; in this case it checks
682 for a subexpression of IN that is Lisp "equal" to REG. */
685 reg_mentioned_p (const_rtx reg
, const_rtx in
)
697 if (GET_CODE (in
) == LABEL_REF
)
698 return reg
== XEXP (in
, 0);
700 code
= GET_CODE (in
);
704 /* Compare registers by number. */
706 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
708 /* These codes have no constituent expressions
716 /* These are kept unique for a given value. */
723 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
726 fmt
= GET_RTX_FORMAT (code
);
728 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
733 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
734 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
737 else if (fmt
[i
] == 'e'
738 && reg_mentioned_p (reg
, XEXP (in
, i
)))
744 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
745 no CODE_LABEL insn. */
748 no_labels_between_p (const_rtx beg
, const_rtx end
)
753 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
759 /* Nonzero if register REG is used in an insn between
760 FROM_INSN and TO_INSN (exclusive of those two). */
763 reg_used_between_p (const_rtx reg
, const_rtx from_insn
, const_rtx to_insn
)
767 if (from_insn
== to_insn
)
770 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
771 if (NONDEBUG_INSN_P (insn
)
772 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
773 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
778 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
779 is entirely replaced by a new value and the only use is as a SET_DEST,
780 we do not consider it a reference. */
783 reg_referenced_p (const_rtx x
, const_rtx body
)
787 switch (GET_CODE (body
))
790 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
793 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
794 of a REG that occupies all of the REG, the insn references X if
795 it is mentioned in the destination. */
796 if (GET_CODE (SET_DEST (body
)) != CC0
797 && GET_CODE (SET_DEST (body
)) != PC
798 && !REG_P (SET_DEST (body
))
799 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
800 && REG_P (SUBREG_REG (SET_DEST (body
)))
801 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
802 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
803 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
804 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
805 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
810 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
811 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
818 return reg_overlap_mentioned_p (x
, body
);
821 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
824 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
827 case UNSPEC_VOLATILE
:
828 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
829 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
834 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
835 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
840 if (MEM_P (XEXP (body
, 0)))
841 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
846 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
848 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
855 /* Nonzero if register REG is set or clobbered in an insn between
856 FROM_INSN and TO_INSN (exclusive of those two). */
859 reg_set_between_p (const_rtx reg
, const_rtx from_insn
, const_rtx to_insn
)
863 if (from_insn
== to_insn
)
866 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
867 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
872 /* Internals of reg_set_between_p. */
874 reg_set_p (const_rtx reg
, const_rtx insn
)
876 /* We can be passed an insn or part of one. If we are passed an insn,
877 check if a side-effect of the insn clobbers REG. */
879 && (FIND_REG_INC_NOTE (insn
, reg
)
882 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
883 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
884 GET_MODE (reg
), REGNO (reg
)))
886 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
889 return set_of (reg
, insn
) != NULL_RTX
;
892 /* Similar to reg_set_between_p, but check all registers in X. Return 0
893 only if none of them are modified between START and END. Return 1 if
894 X contains a MEM; this routine does use memory aliasing. */
897 modified_between_p (const_rtx x
, const_rtx start
, const_rtx end
)
899 const enum rtx_code code
= GET_CODE (x
);
920 if (modified_between_p (XEXP (x
, 0), start
, end
))
922 if (MEM_READONLY_P (x
))
924 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
925 if (memory_modified_in_insn_p (x
, insn
))
931 return reg_set_between_p (x
, start
, end
);
937 fmt
= GET_RTX_FORMAT (code
);
938 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
940 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
943 else if (fmt
[i
] == 'E')
944 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
945 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
952 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
953 of them are modified in INSN. Return 1 if X contains a MEM; this routine
954 does use memory aliasing. */
957 modified_in_p (const_rtx x
, const_rtx insn
)
959 const enum rtx_code code
= GET_CODE (x
);
976 if (modified_in_p (XEXP (x
, 0), insn
))
978 if (MEM_READONLY_P (x
))
980 if (memory_modified_in_insn_p (x
, insn
))
986 return reg_set_p (x
, insn
);
992 fmt
= GET_RTX_FORMAT (code
);
993 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
995 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
998 else if (fmt
[i
] == 'E')
999 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1000 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
1007 /* Helper function for set_of. */
1015 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
1017 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
1018 if (rtx_equal_p (x
, data
->pat
)
1019 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
1023 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1024 (either directly or via STRICT_LOW_PART and similar modifiers). */
1026 set_of (const_rtx pat
, const_rtx insn
)
1028 struct set_of_data data
;
1029 data
.found
= NULL_RTX
;
1031 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1035 /* This function, called through note_stores, collects sets and
1036 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1039 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1041 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1042 if (REG_P (x
) && HARD_REGISTER_P (x
))
1043 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1046 /* Examine INSN, and compute the set of hard registers written by it.
1047 Store it in *PSET. Should only be called after reload. */
1049 find_all_hard_reg_sets (const_rtx insn
, HARD_REG_SET
*pset
, bool implicit
)
1053 CLEAR_HARD_REG_SET (*pset
);
1054 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1058 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1060 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1061 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1063 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1064 if (REG_NOTE_KIND (link
) == REG_INC
)
1065 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1068 /* A for_each_rtx subroutine of record_hard_reg_uses. */
1070 record_hard_reg_uses_1 (rtx
*px
, void *data
)
1073 HARD_REG_SET
*pused
= (HARD_REG_SET
*)data
;
1075 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1077 int nregs
= hard_regno_nregs
[REGNO (x
)][GET_MODE (x
)];
1079 SET_HARD_REG_BIT (*pused
, REGNO (x
) + nregs
);
1084 /* Like record_hard_reg_sets, but called through note_uses. */
1086 record_hard_reg_uses (rtx
*px
, void *data
)
1088 for_each_rtx (px
, record_hard_reg_uses_1
, data
);
1091 /* Given an INSN, return a SET expression if this insn has only a single SET.
1092 It may also have CLOBBERs, USEs, or SET whose output
1093 will not be used, which we ignore. */
1096 single_set_2 (const_rtx insn
, const_rtx pat
)
1099 int set_verified
= 1;
1102 if (GET_CODE (pat
) == PARALLEL
)
1104 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1106 rtx sub
= XVECEXP (pat
, 0, i
);
1107 switch (GET_CODE (sub
))
1114 /* We can consider insns having multiple sets, where all
1115 but one are dead as single set insns. In common case
1116 only single set is present in the pattern so we want
1117 to avoid checking for REG_UNUSED notes unless necessary.
1119 When we reach set first time, we just expect this is
1120 the single set we are looking for and only when more
1121 sets are found in the insn, we check them. */
1124 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1125 && !side_effects_p (set
))
1131 set
= sub
, set_verified
= 0;
1132 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1133 || side_effects_p (sub
))
1145 /* Given an INSN, return nonzero if it has more than one SET, else return
1149 multiple_sets (const_rtx insn
)
1154 /* INSN must be an insn. */
1155 if (! INSN_P (insn
))
1158 /* Only a PARALLEL can have multiple SETs. */
1159 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1161 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1162 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1164 /* If we have already found a SET, then return now. */
1172 /* Either zero or one SET. */
1176 /* Return nonzero if the destination of SET equals the source
1177 and there are no side effects. */
1180 set_noop_p (const_rtx set
)
1182 rtx src
= SET_SRC (set
);
1183 rtx dst
= SET_DEST (set
);
1185 if (dst
== pc_rtx
&& src
== pc_rtx
)
1188 if (MEM_P (dst
) && MEM_P (src
))
1189 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1191 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1192 return rtx_equal_p (XEXP (dst
, 0), src
)
1193 && ! BYTES_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1194 && !side_effects_p (src
);
1196 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1197 dst
= XEXP (dst
, 0);
1199 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1201 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1203 src
= SUBREG_REG (src
);
1204 dst
= SUBREG_REG (dst
);
1207 /* It is a NOOP if destination overlaps with selected src vector
1209 if (GET_CODE (src
) == VEC_SELECT
1210 && REG_P (XEXP (src
, 0)) && REG_P (dst
)
1211 && HARD_REGISTER_P (XEXP (src
, 0))
1212 && HARD_REGISTER_P (dst
))
1215 rtx par
= XEXP (src
, 1);
1216 rtx src0
= XEXP (src
, 0);
1217 int c0
= INTVAL (XVECEXP (par
, 0, 0));
1218 HOST_WIDE_INT offset
= GET_MODE_UNIT_SIZE (GET_MODE (src0
)) * c0
;
1220 for (i
= 1; i
< XVECLEN (par
, 0); i
++)
1221 if (INTVAL (XVECEXP (par
, 0, i
)) != c0
+ i
)
1224 simplify_subreg_regno (REGNO (src0
), GET_MODE (src0
),
1225 offset
, GET_MODE (dst
)) == (int) REGNO (dst
);
1228 return (REG_P (src
) && REG_P (dst
)
1229 && REGNO (src
) == REGNO (dst
));
1232 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1236 noop_move_p (const_rtx insn
)
1238 rtx pat
= PATTERN (insn
);
1240 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1243 /* Insns carrying these notes are useful later on. */
1244 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1247 /* Check the code to be executed for COND_EXEC. */
1248 if (GET_CODE (pat
) == COND_EXEC
)
1249 pat
= COND_EXEC_CODE (pat
);
1251 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1254 if (GET_CODE (pat
) == PARALLEL
)
1257 /* If nothing but SETs of registers to themselves,
1258 this insn can also be deleted. */
1259 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1261 rtx tem
= XVECEXP (pat
, 0, i
);
1263 if (GET_CODE (tem
) == USE
1264 || GET_CODE (tem
) == CLOBBER
)
1267 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1277 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1278 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1279 If the object was modified, if we hit a partial assignment to X, or hit a
1280 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1281 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1285 find_last_value (rtx x
, rtx
*pinsn
, rtx valid_to
, int allow_hwreg
)
1289 for (p
= PREV_INSN (*pinsn
); p
&& !LABEL_P (p
);
1293 rtx set
= single_set (p
);
1294 rtx note
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
1296 if (set
&& rtx_equal_p (x
, SET_DEST (set
)))
1298 rtx src
= SET_SRC (set
);
1300 if (note
&& GET_CODE (XEXP (note
, 0)) != EXPR_LIST
)
1301 src
= XEXP (note
, 0);
1303 if ((valid_to
== NULL_RTX
1304 || ! modified_between_p (src
, PREV_INSN (p
), valid_to
))
1305 /* Reject hard registers because we don't usually want
1306 to use them; we'd rather use a pseudo. */
1308 && REGNO (src
) < FIRST_PSEUDO_REGISTER
) || allow_hwreg
))
1315 /* If set in non-simple way, we don't have a value. */
1316 if (reg_set_p (x
, p
))
1323 /* Return nonzero if register in range [REGNO, ENDREGNO)
1324 appears either explicitly or implicitly in X
1325 other than being stored into.
1327 References contained within the substructure at LOC do not count.
1328 LOC may be zero, meaning don't ignore anything. */
1331 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1335 unsigned int x_regno
;
1340 /* The contents of a REG_NONNEG note is always zero, so we must come here
1341 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1345 code
= GET_CODE (x
);
1350 x_regno
= REGNO (x
);
1352 /* If we modifying the stack, frame, or argument pointer, it will
1353 clobber a virtual register. In fact, we could be more precise,
1354 but it isn't worth it. */
1355 if ((x_regno
== STACK_POINTER_REGNUM
1356 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1357 || x_regno
== ARG_POINTER_REGNUM
1359 || x_regno
== FRAME_POINTER_REGNUM
)
1360 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1363 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1366 /* If this is a SUBREG of a hard reg, we can see exactly which
1367 registers are being modified. Otherwise, handle normally. */
1368 if (REG_P (SUBREG_REG (x
))
1369 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1371 unsigned int inner_regno
= subreg_regno (x
);
1372 unsigned int inner_endregno
1373 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1374 ? subreg_nregs (x
) : 1);
1376 return endregno
> inner_regno
&& regno
< inner_endregno
;
1382 if (&SET_DEST (x
) != loc
1383 /* Note setting a SUBREG counts as referring to the REG it is in for
1384 a pseudo but not for hard registers since we can
1385 treat each word individually. */
1386 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1387 && loc
!= &SUBREG_REG (SET_DEST (x
))
1388 && REG_P (SUBREG_REG (SET_DEST (x
)))
1389 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1390 && refers_to_regno_p (regno
, endregno
,
1391 SUBREG_REG (SET_DEST (x
)), loc
))
1392 || (!REG_P (SET_DEST (x
))
1393 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1396 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1405 /* X does not match, so try its subexpressions. */
1407 fmt
= GET_RTX_FORMAT (code
);
1408 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1410 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1418 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1421 else if (fmt
[i
] == 'E')
1424 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1425 if (loc
!= &XVECEXP (x
, i
, j
)
1426 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1433 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1434 we check if any register number in X conflicts with the relevant register
1435 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1436 contains a MEM (we don't bother checking for memory addresses that can't
1437 conflict because we expect this to be a rare case. */
1440 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1442 unsigned int regno
, endregno
;
1444 /* If either argument is a constant, then modifying X can not
1445 affect IN. Here we look at IN, we can profitably combine
1446 CONSTANT_P (x) with the switch statement below. */
1447 if (CONSTANT_P (in
))
1451 switch (GET_CODE (x
))
1453 case STRICT_LOW_PART
:
1456 /* Overly conservative. */
1461 regno
= REGNO (SUBREG_REG (x
));
1462 if (regno
< FIRST_PSEUDO_REGISTER
)
1463 regno
= subreg_regno (x
);
1464 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1465 ? subreg_nregs (x
) : 1);
1470 endregno
= END_REGNO (x
);
1472 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1482 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1483 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1486 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1489 else if (fmt
[i
] == 'E')
1492 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1493 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1503 return reg_mentioned_p (x
, in
);
1509 /* If any register in here refers to it we return true. */
1510 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1511 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1512 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1518 gcc_assert (CONSTANT_P (x
));
1523 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1524 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1525 ignored by note_stores, but passed to FUN.
1527 FUN receives three arguments:
1528 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1529 2. the SET or CLOBBER rtx that does the store,
1530 3. the pointer DATA provided to note_stores.
1532 If the item being stored in or clobbered is a SUBREG of a hard register,
1533 the SUBREG will be passed. */
1536 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1540 if (GET_CODE (x
) == COND_EXEC
)
1541 x
= COND_EXEC_CODE (x
);
1543 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1545 rtx dest
= SET_DEST (x
);
1547 while ((GET_CODE (dest
) == SUBREG
1548 && (!REG_P (SUBREG_REG (dest
))
1549 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1550 || GET_CODE (dest
) == ZERO_EXTRACT
1551 || GET_CODE (dest
) == STRICT_LOW_PART
)
1552 dest
= XEXP (dest
, 0);
1554 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1555 each of whose first operand is a register. */
1556 if (GET_CODE (dest
) == PARALLEL
)
1558 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1559 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1560 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1563 (*fun
) (dest
, x
, data
);
1566 else if (GET_CODE (x
) == PARALLEL
)
1567 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1568 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1571 /* Like notes_stores, but call FUN for each expression that is being
1572 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1573 FUN for each expression, not any interior subexpressions. FUN receives a
1574 pointer to the expression and the DATA passed to this function.
1576 Note that this is not quite the same test as that done in reg_referenced_p
1577 since that considers something as being referenced if it is being
1578 partially set, while we do not. */
1581 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1586 switch (GET_CODE (body
))
1589 (*fun
) (&COND_EXEC_TEST (body
), data
);
1590 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1594 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1595 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1599 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1600 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1604 (*fun
) (&XEXP (body
, 0), data
);
1608 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1609 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1613 (*fun
) (&TRAP_CONDITION (body
), data
);
1617 (*fun
) (&XEXP (body
, 0), data
);
1621 case UNSPEC_VOLATILE
:
1622 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1623 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1627 if (MEM_P (XEXP (body
, 0)))
1628 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1633 rtx dest
= SET_DEST (body
);
1635 /* For sets we replace everything in source plus registers in memory
1636 expression in store and operands of a ZERO_EXTRACT. */
1637 (*fun
) (&SET_SRC (body
), data
);
1639 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1641 (*fun
) (&XEXP (dest
, 1), data
);
1642 (*fun
) (&XEXP (dest
, 2), data
);
1645 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1646 dest
= XEXP (dest
, 0);
1649 (*fun
) (&XEXP (dest
, 0), data
);
1654 /* All the other possibilities never store. */
1655 (*fun
) (pbody
, data
);
1660 /* Return nonzero if X's old contents don't survive after INSN.
1661 This will be true if X is (cc0) or if X is a register and
1662 X dies in INSN or because INSN entirely sets X.
1664 "Entirely set" means set directly and not through a SUBREG, or
1665 ZERO_EXTRACT, so no trace of the old contents remains.
1666 Likewise, REG_INC does not count.
1668 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1669 but for this use that makes no difference, since regs don't overlap
1670 during their lifetimes. Therefore, this function may be used
1671 at any time after deaths have been computed.
1673 If REG is a hard reg that occupies multiple machine registers, this
1674 function will only return 1 if each of those registers will be replaced
1678 dead_or_set_p (const_rtx insn
, const_rtx x
)
1680 unsigned int regno
, end_regno
;
1683 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1684 if (GET_CODE (x
) == CC0
)
1687 gcc_assert (REG_P (x
));
1690 end_regno
= END_REGNO (x
);
1691 for (i
= regno
; i
< end_regno
; i
++)
1692 if (! dead_or_set_regno_p (insn
, i
))
1698 /* Return TRUE iff DEST is a register or subreg of a register and
1699 doesn't change the number of words of the inner register, and any
1700 part of the register is TEST_REGNO. */
1703 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
1705 unsigned int regno
, endregno
;
1707 if (GET_CODE (dest
) == SUBREG
1708 && (((GET_MODE_SIZE (GET_MODE (dest
))
1709 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
1710 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
1711 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
1712 dest
= SUBREG_REG (dest
);
1717 regno
= REGNO (dest
);
1718 endregno
= END_REGNO (dest
);
1719 return (test_regno
>= regno
&& test_regno
< endregno
);
1722 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1723 any member matches the covers_regno_no_parallel_p criteria. */
1726 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
1728 if (GET_CODE (dest
) == PARALLEL
)
1730 /* Some targets place small structures in registers for return
1731 values of functions, and those registers are wrapped in
1732 PARALLELs that we may see as the destination of a SET. */
1735 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1737 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
1738 if (inner
!= NULL_RTX
1739 && covers_regno_no_parallel_p (inner
, test_regno
))
1746 return covers_regno_no_parallel_p (dest
, test_regno
);
1749 /* Utility function for dead_or_set_p to check an individual register. */
1752 dead_or_set_regno_p (const_rtx insn
, unsigned int test_regno
)
1756 /* See if there is a death note for something that includes TEST_REGNO. */
1757 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
1761 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
1764 pattern
= PATTERN (insn
);
1766 /* If a COND_EXEC is not executed, the value survives. */
1767 if (GET_CODE (pattern
) == COND_EXEC
)
1770 if (GET_CODE (pattern
) == SET
)
1771 return covers_regno_p (SET_DEST (pattern
), test_regno
);
1772 else if (GET_CODE (pattern
) == PARALLEL
)
1776 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
1778 rtx body
= XVECEXP (pattern
, 0, i
);
1780 if (GET_CODE (body
) == COND_EXEC
)
1781 body
= COND_EXEC_CODE (body
);
1783 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
1784 && covers_regno_p (SET_DEST (body
), test_regno
))
1792 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1793 If DATUM is nonzero, look for one whose datum is DATUM. */
1796 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
1800 gcc_checking_assert (insn
);
1802 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1803 if (! INSN_P (insn
))
1807 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1808 if (REG_NOTE_KIND (link
) == kind
)
1813 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1814 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
1819 /* Return the reg-note of kind KIND in insn INSN which applies to register
1820 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1821 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1822 it might be the case that the note overlaps REGNO. */
1825 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
1829 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1830 if (! INSN_P (insn
))
1833 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1834 if (REG_NOTE_KIND (link
) == kind
1835 /* Verify that it is a register, so that scratch and MEM won't cause a
1837 && REG_P (XEXP (link
, 0))
1838 && REGNO (XEXP (link
, 0)) <= regno
1839 && END_REGNO (XEXP (link
, 0)) > regno
)
1844 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1848 find_reg_equal_equiv_note (const_rtx insn
)
1855 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1856 if (REG_NOTE_KIND (link
) == REG_EQUAL
1857 || REG_NOTE_KIND (link
) == REG_EQUIV
)
1859 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1860 insns that have multiple sets. Checking single_set to
1861 make sure of this is not the proper check, as explained
1862 in the comment in set_unique_reg_note.
1864 This should be changed into an assert. */
1865 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
1872 /* Check whether INSN is a single_set whose source is known to be
1873 equivalent to a constant. Return that constant if so, otherwise
1877 find_constant_src (const_rtx insn
)
1881 set
= single_set (insn
);
1884 x
= avoid_constant_pool_reference (SET_SRC (set
));
1889 note
= find_reg_equal_equiv_note (insn
);
1890 if (note
&& CONSTANT_P (XEXP (note
, 0)))
1891 return XEXP (note
, 0);
1896 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1897 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1900 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
1902 /* If it's not a CALL_INSN, it can't possibly have a
1903 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1913 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
1915 link
= XEXP (link
, 1))
1916 if (GET_CODE (XEXP (link
, 0)) == code
1917 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
1922 unsigned int regno
= REGNO (datum
);
1924 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1925 to pseudo registers, so don't bother checking. */
1927 if (regno
< FIRST_PSEUDO_REGISTER
)
1929 unsigned int end_regno
= END_HARD_REGNO (datum
);
1932 for (i
= regno
; i
< end_regno
; i
++)
1933 if (find_regno_fusage (insn
, code
, i
))
1941 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1942 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1945 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
1949 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1950 to pseudo registers, so don't bother checking. */
1952 if (regno
>= FIRST_PSEUDO_REGISTER
1956 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1960 if (GET_CODE (op
= XEXP (link
, 0)) == code
1961 && REG_P (reg
= XEXP (op
, 0))
1962 && REGNO (reg
) <= regno
1963 && END_HARD_REGNO (reg
) > regno
)
1971 /* Return true if KIND is an integer REG_NOTE. */
1974 int_reg_note_p (enum reg_note kind
)
1976 return kind
== REG_BR_PROB
;
1979 /* Allocate a register note with kind KIND and datum DATUM. LIST is
1980 stored as the pointer to the next register note. */
1983 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
1987 gcc_checking_assert (!int_reg_note_p (kind
));
1992 case REG_LABEL_TARGET
:
1993 case REG_LABEL_OPERAND
:
1995 /* These types of register notes use an INSN_LIST rather than an
1996 EXPR_LIST, so that copying is done right and dumps look
1998 note
= alloc_INSN_LIST (datum
, list
);
1999 PUT_REG_NOTE_KIND (note
, kind
);
2003 note
= alloc_EXPR_LIST (kind
, datum
, list
);
2010 /* Add register note with kind KIND and datum DATUM to INSN. */
2013 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
2015 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
2018 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2021 add_int_reg_note (rtx insn
, enum reg_note kind
, int datum
)
2023 gcc_checking_assert (int_reg_note_p (kind
));
2024 REG_NOTES (insn
) = gen_rtx_INT_LIST ((enum machine_mode
) kind
,
2025 datum
, REG_NOTES (insn
));
2028 /* Add a register note like NOTE to INSN. */
2031 add_shallow_copy_of_reg_note (rtx insn
, rtx note
)
2033 if (GET_CODE (note
) == INT_LIST
)
2034 add_int_reg_note (insn
, REG_NOTE_KIND (note
), XINT (note
, 0));
2036 add_reg_note (insn
, REG_NOTE_KIND (note
), XEXP (note
, 0));
2039 /* Remove register note NOTE from the REG_NOTES of INSN. */
2042 remove_note (rtx insn
, const_rtx note
)
2046 if (note
== NULL_RTX
)
2049 if (REG_NOTES (insn
) == note
)
2050 REG_NOTES (insn
) = XEXP (note
, 1);
2052 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
2053 if (XEXP (link
, 1) == note
)
2055 XEXP (link
, 1) = XEXP (note
, 1);
2059 switch (REG_NOTE_KIND (note
))
2063 df_notes_rescan (insn
);
2070 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
2073 remove_reg_equal_equiv_notes (rtx insn
)
2077 loc
= ®_NOTES (insn
);
2080 enum reg_note kind
= REG_NOTE_KIND (*loc
);
2081 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
2082 *loc
= XEXP (*loc
, 1);
2084 loc
= &XEXP (*loc
, 1);
2088 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2091 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2098 /* This loop is a little tricky. We cannot just go down the chain because
2099 it is being modified by some actions in the loop. So we just iterate
2100 over the head. We plan to drain the list anyway. */
2101 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2103 rtx insn
= DF_REF_INSN (eq_use
);
2104 rtx note
= find_reg_equal_equiv_note (insn
);
2106 /* This assert is generally triggered when someone deletes a REG_EQUAL
2107 or REG_EQUIV note by hacking the list manually rather than calling
2111 remove_note (insn
, note
);
2115 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2116 return 1 if it is found. A simple equality test is used to determine if
2120 in_expr_list_p (const_rtx listp
, const_rtx node
)
2124 for (x
= listp
; x
; x
= XEXP (x
, 1))
2125 if (node
== XEXP (x
, 0))
2131 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2132 remove that entry from the list if it is found.
2134 A simple equality test is used to determine if NODE matches. */
2137 remove_node_from_expr_list (const_rtx node
, rtx
*listp
)
2140 rtx prev
= NULL_RTX
;
2144 if (node
== XEXP (temp
, 0))
2146 /* Splice the node out of the list. */
2148 XEXP (prev
, 1) = XEXP (temp
, 1);
2150 *listp
= XEXP (temp
, 1);
2156 temp
= XEXP (temp
, 1);
2160 /* Nonzero if X contains any volatile instructions. These are instructions
2161 which may cause unpredictable machine state instructions, and thus no
2162 instructions or register uses should be moved or combined across them.
2163 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2166 volatile_insn_p (const_rtx x
)
2168 const RTX_CODE code
= GET_CODE (x
);
2186 case UNSPEC_VOLATILE
:
2191 if (MEM_VOLATILE_P (x
))
2198 /* Recursively scan the operands of this expression. */
2201 const char *const fmt
= GET_RTX_FORMAT (code
);
2204 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2208 if (volatile_insn_p (XEXP (x
, i
)))
2211 else if (fmt
[i
] == 'E')
2214 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2215 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2223 /* Nonzero if X contains any volatile memory references
2224 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2227 volatile_refs_p (const_rtx x
)
2229 const RTX_CODE code
= GET_CODE (x
);
2245 case UNSPEC_VOLATILE
:
2251 if (MEM_VOLATILE_P (x
))
2258 /* Recursively scan the operands of this expression. */
2261 const char *const fmt
= GET_RTX_FORMAT (code
);
2264 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2268 if (volatile_refs_p (XEXP (x
, i
)))
2271 else if (fmt
[i
] == 'E')
2274 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2275 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2283 /* Similar to above, except that it also rejects register pre- and post-
2287 side_effects_p (const_rtx x
)
2289 const RTX_CODE code
= GET_CODE (x
);
2306 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2307 when some combination can't be done. If we see one, don't think
2308 that we can simplify the expression. */
2309 return (GET_MODE (x
) != VOIDmode
);
2318 case UNSPEC_VOLATILE
:
2324 if (MEM_VOLATILE_P (x
))
2331 /* Recursively scan the operands of this expression. */
2334 const char *fmt
= GET_RTX_FORMAT (code
);
2337 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2341 if (side_effects_p (XEXP (x
, i
)))
2344 else if (fmt
[i
] == 'E')
2347 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2348 if (side_effects_p (XVECEXP (x
, i
, j
)))
2356 /* Return nonzero if evaluating rtx X might cause a trap.
2357 FLAGS controls how to consider MEMs. A nonzero means the context
2358 of the access may have changed from the original, such that the
2359 address may have become invalid. */
2362 may_trap_p_1 (const_rtx x
, unsigned flags
)
2368 /* We make no distinction currently, but this function is part of
2369 the internal target-hooks ABI so we keep the parameter as
2370 "unsigned flags". */
2371 bool code_changed
= flags
!= 0;
2375 code
= GET_CODE (x
);
2378 /* Handle these cases quickly. */
2390 return targetm
.unspec_may_trap_p (x
, flags
);
2392 case UNSPEC_VOLATILE
:
2398 return MEM_VOLATILE_P (x
);
2400 /* Memory ref can trap unless it's a static var or a stack slot. */
2402 /* Recognize specific pattern of stack checking probes. */
2403 if (flag_stack_check
2404 && MEM_VOLATILE_P (x
)
2405 && XEXP (x
, 0) == stack_pointer_rtx
)
2407 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2408 reference; moving it out of context such as when moving code
2409 when optimizing, might cause its address to become invalid. */
2411 || !MEM_NOTRAP_P (x
))
2413 HOST_WIDE_INT size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : 0;
2414 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2415 GET_MODE (x
), code_changed
);
2420 /* Division by a non-constant might trap. */
2425 if (HONOR_SNANS (GET_MODE (x
)))
2427 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2428 return flag_trapping_math
;
2429 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2434 /* An EXPR_LIST is used to represent a function call. This
2435 certainly may trap. */
2444 /* Some floating point comparisons may trap. */
2445 if (!flag_trapping_math
)
2447 /* ??? There is no machine independent way to check for tests that trap
2448 when COMPARE is used, though many targets do make this distinction.
2449 For instance, sparc uses CCFPE for compares which generate exceptions
2450 and CCFP for compares which do not generate exceptions. */
2451 if (HONOR_NANS (GET_MODE (x
)))
2453 /* But often the compare has some CC mode, so check operand
2455 if (HONOR_NANS (GET_MODE (XEXP (x
, 0)))
2456 || HONOR_NANS (GET_MODE (XEXP (x
, 1))))
2462 if (HONOR_SNANS (GET_MODE (x
)))
2464 /* Often comparison is CC mode, so check operand modes. */
2465 if (HONOR_SNANS (GET_MODE (XEXP (x
, 0)))
2466 || HONOR_SNANS (GET_MODE (XEXP (x
, 1))))
2471 /* Conversion of floating point might trap. */
2472 if (flag_trapping_math
&& HONOR_NANS (GET_MODE (XEXP (x
, 0))))
2479 /* These operations don't trap even with floating point. */
2483 /* Any floating arithmetic may trap. */
2484 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)) && flag_trapping_math
)
2488 fmt
= GET_RTX_FORMAT (code
);
2489 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2493 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2496 else if (fmt
[i
] == 'E')
2499 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2500 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2507 /* Return nonzero if evaluating rtx X might cause a trap. */
2510 may_trap_p (const_rtx x
)
2512 return may_trap_p_1 (x
, 0);
2515 /* Same as above, but additionally return nonzero if evaluating rtx X might
2516 cause a fault. We define a fault for the purpose of this function as a
2517 erroneous execution condition that cannot be encountered during the normal
2518 execution of a valid program; the typical example is an unaligned memory
2519 access on a strict alignment machine. The compiler guarantees that it
2520 doesn't generate code that will fault from a valid program, but this
2521 guarantee doesn't mean anything for individual instructions. Consider
2522 the following example:
2524 struct S { int d; union { char *cp; int *ip; }; };
2526 int foo(struct S *s)
2534 on a strict alignment machine. In a valid program, foo will never be
2535 invoked on a structure for which d is equal to 1 and the underlying
2536 unique field of the union not aligned on a 4-byte boundary, but the
2537 expression *s->ip might cause a fault if considered individually.
2539 At the RTL level, potentially problematic expressions will almost always
2540 verify may_trap_p; for example, the above dereference can be emitted as
2541 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2542 However, suppose that foo is inlined in a caller that causes s->cp to
2543 point to a local character variable and guarantees that s->d is not set
2544 to 1; foo may have been effectively translated into pseudo-RTL as:
2547 (set (reg:SI) (mem:SI (%fp - 7)))
2549 (set (reg:QI) (mem:QI (%fp - 7)))
2551 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2552 memory reference to a stack slot, but it will certainly cause a fault
2553 on a strict alignment machine. */
2556 may_trap_or_fault_p (const_rtx x
)
2558 return may_trap_p_1 (x
, 1);
2561 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2562 i.e., an inequality. */
2565 inequality_comparisons_p (const_rtx x
)
2569 const enum rtx_code code
= GET_CODE (x
);
2597 len
= GET_RTX_LENGTH (code
);
2598 fmt
= GET_RTX_FORMAT (code
);
2600 for (i
= 0; i
< len
; i
++)
2604 if (inequality_comparisons_p (XEXP (x
, i
)))
2607 else if (fmt
[i
] == 'E')
2610 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2611 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2619 /* Replace any occurrence of FROM in X with TO. The function does
2620 not enter into CONST_DOUBLE for the replace.
2622 Note that copying is not done so X must not be shared unless all copies
2623 are to be modified. */
2626 replace_rtx (rtx x
, rtx from
, rtx to
)
2634 /* Allow this function to make replacements in EXPR_LISTs. */
2638 if (GET_CODE (x
) == SUBREG
)
2640 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
);
2642 if (CONST_INT_P (new_rtx
))
2644 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
2645 GET_MODE (SUBREG_REG (x
)),
2650 SUBREG_REG (x
) = new_rtx
;
2654 else if (GET_CODE (x
) == ZERO_EXTEND
)
2656 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
);
2658 if (CONST_INT_P (new_rtx
))
2660 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
2661 new_rtx
, GET_MODE (XEXP (x
, 0)));
2665 XEXP (x
, 0) = new_rtx
;
2670 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2671 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2674 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
);
2675 else if (fmt
[i
] == 'E')
2676 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2677 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
), from
, to
);
2683 /* Replace occurrences of the old label in *X with the new one.
2684 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2687 replace_label (rtx
*x
, void *data
)
2690 rtx old_label
= ((replace_label_data
*) data
)->r1
;
2691 rtx new_label
= ((replace_label_data
*) data
)->r2
;
2692 bool update_label_nuses
= ((replace_label_data
*) data
)->update_label_nuses
;
2697 if (GET_CODE (l
) == SYMBOL_REF
2698 && CONSTANT_POOL_ADDRESS_P (l
))
2700 rtx c
= get_pool_constant (l
);
2701 if (rtx_referenced_p (old_label
, c
))
2704 replace_label_data
*d
= (replace_label_data
*) data
;
2706 /* Create a copy of constant C; replace the label inside
2707 but do not update LABEL_NUSES because uses in constant pool
2709 new_c
= copy_rtx (c
);
2710 d
->update_label_nuses
= false;
2711 for_each_rtx (&new_c
, replace_label
, data
);
2712 d
->update_label_nuses
= update_label_nuses
;
2714 /* Add the new constant NEW_C to constant pool and replace
2715 the old reference to constant by new reference. */
2716 new_l
= XEXP (force_const_mem (get_pool_mode (l
), new_c
), 0);
2717 *x
= replace_rtx (l
, l
, new_l
);
2722 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2723 field. This is not handled by for_each_rtx because it doesn't
2724 handle unprinted ('0') fields. */
2725 if (JUMP_P (l
) && JUMP_LABEL (l
) == old_label
)
2726 JUMP_LABEL (l
) = new_label
;
2728 if ((GET_CODE (l
) == LABEL_REF
2729 || GET_CODE (l
) == INSN_LIST
)
2730 && XEXP (l
, 0) == old_label
)
2732 XEXP (l
, 0) = new_label
;
2733 if (update_label_nuses
)
2735 ++LABEL_NUSES (new_label
);
2736 --LABEL_NUSES (old_label
);
2744 /* When *BODY is equal to X or X is directly referenced by *BODY
2745 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2746 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2749 rtx_referenced_p_1 (rtx
*body
, void *x
)
2753 if (*body
== NULL_RTX
)
2754 return y
== NULL_RTX
;
2756 /* Return true if a label_ref *BODY refers to label Y. */
2757 if (GET_CODE (*body
) == LABEL_REF
&& LABEL_P (y
))
2758 return XEXP (*body
, 0) == y
;
2760 /* If *BODY is a reference to pool constant traverse the constant. */
2761 if (GET_CODE (*body
) == SYMBOL_REF
2762 && CONSTANT_POOL_ADDRESS_P (*body
))
2763 return rtx_referenced_p (y
, get_pool_constant (*body
));
2765 /* By default, compare the RTL expressions. */
2766 return rtx_equal_p (*body
, y
);
2769 /* Return true if X is referenced in BODY. */
2772 rtx_referenced_p (rtx x
, rtx body
)
2774 return for_each_rtx (&body
, rtx_referenced_p_1
, x
);
2777 /* If INSN is a tablejump return true and store the label (before jump table) to
2778 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2781 tablejump_p (const_rtx insn
, rtx
*labelp
, rtx
*tablep
)
2788 label
= JUMP_LABEL (insn
);
2789 if (label
!= NULL_RTX
&& !ANY_RETURN_P (label
)
2790 && (table
= NEXT_INSN (label
)) != NULL_RTX
2791 && JUMP_TABLE_DATA_P (table
))
2802 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2803 constant that is not in the constant pool and not in the condition
2804 of an IF_THEN_ELSE. */
2807 computed_jump_p_1 (const_rtx x
)
2809 const enum rtx_code code
= GET_CODE (x
);
2826 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
2827 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
2830 return (computed_jump_p_1 (XEXP (x
, 1))
2831 || computed_jump_p_1 (XEXP (x
, 2)));
2837 fmt
= GET_RTX_FORMAT (code
);
2838 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2841 && computed_jump_p_1 (XEXP (x
, i
)))
2844 else if (fmt
[i
] == 'E')
2845 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2846 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
2853 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2855 Tablejumps and casesi insns are not considered indirect jumps;
2856 we can recognize them by a (use (label_ref)). */
2859 computed_jump_p (const_rtx insn
)
2864 rtx pat
= PATTERN (insn
);
2866 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2867 if (JUMP_LABEL (insn
) != NULL
)
2870 if (GET_CODE (pat
) == PARALLEL
)
2872 int len
= XVECLEN (pat
, 0);
2873 int has_use_labelref
= 0;
2875 for (i
= len
- 1; i
>= 0; i
--)
2876 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
2877 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
2880 has_use_labelref
= 1;
2884 if (! has_use_labelref
)
2885 for (i
= len
- 1; i
>= 0; i
--)
2886 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
2887 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
2888 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
2891 else if (GET_CODE (pat
) == SET
2892 && SET_DEST (pat
) == pc_rtx
2893 && computed_jump_p_1 (SET_SRC (pat
)))
2899 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2900 calls. Processes the subexpressions of EXP and passes them to F. */
2902 for_each_rtx_1 (rtx exp
, int n
, rtx_function f
, void *data
)
2905 const char *format
= GET_RTX_FORMAT (GET_CODE (exp
));
2908 for (; format
[n
] != '\0'; n
++)
2915 result
= (*f
) (x
, data
);
2917 /* Do not traverse sub-expressions. */
2919 else if (result
!= 0)
2920 /* Stop the traversal. */
2924 /* There are no sub-expressions. */
2927 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2930 result
= for_each_rtx_1 (*x
, i
, f
, data
);
2938 if (XVEC (exp
, n
) == 0)
2940 for (j
= 0; j
< XVECLEN (exp
, n
); ++j
)
2943 x
= &XVECEXP (exp
, n
, j
);
2944 result
= (*f
) (x
, data
);
2946 /* Do not traverse sub-expressions. */
2948 else if (result
!= 0)
2949 /* Stop the traversal. */
2953 /* There are no sub-expressions. */
2956 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2959 result
= for_each_rtx_1 (*x
, i
, f
, data
);
2967 /* Nothing to do. */
2975 /* Traverse X via depth-first search, calling F for each
2976 sub-expression (including X itself). F is also passed the DATA.
2977 If F returns -1, do not traverse sub-expressions, but continue
2978 traversing the rest of the tree. If F ever returns any other
2979 nonzero value, stop the traversal, and return the value returned
2980 by F. Otherwise, return 0. This function does not traverse inside
2981 tree structure that contains RTX_EXPRs, or into sub-expressions
2982 whose format code is `0' since it is not known whether or not those
2983 codes are actually RTL.
2985 This routine is very general, and could (should?) be used to
2986 implement many of the other routines in this file. */
2989 for_each_rtx (rtx
*x
, rtx_function f
, void *data
)
2995 result
= (*f
) (x
, data
);
2997 /* Do not traverse sub-expressions. */
2999 else if (result
!= 0)
3000 /* Stop the traversal. */
3004 /* There are no sub-expressions. */
3007 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
3011 return for_each_rtx_1 (*x
, i
, f
, data
);
3016 /* Data structure that holds the internal state communicated between
3017 for_each_inc_dec, for_each_inc_dec_find_mem and
3018 for_each_inc_dec_find_inc_dec. */
3020 struct for_each_inc_dec_ops
{
3021 /* The function to be called for each autoinc operation found. */
3022 for_each_inc_dec_fn fn
;
3023 /* The opaque argument to be passed to it. */
3025 /* The MEM we're visiting, if any. */
3029 static int for_each_inc_dec_find_mem (rtx
*r
, void *d
);
3031 /* Find PRE/POST-INC/DEC/MODIFY operations within *R, extract the
3032 operands of the equivalent add insn and pass the result to the
3033 operator specified by *D. */
3036 for_each_inc_dec_find_inc_dec (rtx
*r
, void *d
)
3039 struct for_each_inc_dec_ops
*data
= (struct for_each_inc_dec_ops
*)d
;
3041 switch (GET_CODE (x
))
3046 int size
= GET_MODE_SIZE (GET_MODE (data
->mem
));
3047 rtx r1
= XEXP (x
, 0);
3048 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
3049 return data
->fn (data
->mem
, x
, r1
, r1
, c
, data
->arg
);
3055 int size
= GET_MODE_SIZE (GET_MODE (data
->mem
));
3056 rtx r1
= XEXP (x
, 0);
3057 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
3058 return data
->fn (data
->mem
, x
, r1
, r1
, c
, data
->arg
);
3064 rtx r1
= XEXP (x
, 0);
3065 rtx add
= XEXP (x
, 1);
3066 return data
->fn (data
->mem
, x
, r1
, add
, NULL
, data
->arg
);
3071 rtx save
= data
->mem
;
3072 int ret
= for_each_inc_dec_find_mem (r
, d
);
3082 /* If *R is a MEM, find PRE/POST-INC/DEC/MODIFY operations within its
3083 address, extract the operands of the equivalent add insn and pass
3084 the result to the operator specified by *D. */
3087 for_each_inc_dec_find_mem (rtx
*r
, void *d
)
3090 if (x
!= NULL_RTX
&& MEM_P (x
))
3092 struct for_each_inc_dec_ops
*data
= (struct for_each_inc_dec_ops
*) d
;
3097 result
= for_each_rtx (&XEXP (x
, 0), for_each_inc_dec_find_inc_dec
,
3107 /* Traverse *X looking for MEMs, and for autoinc operations within
3108 them. For each such autoinc operation found, call FN, passing it
3109 the innermost enclosing MEM, the operation itself, the RTX modified
3110 by the operation, two RTXs (the second may be NULL) that, once
3111 added, represent the value to be held by the modified RTX
3112 afterwards, and ARG. FN is to return -1 to skip looking for other
3113 autoinc operations within the visited operation, 0 to continue the
3114 traversal, or any other value to have it returned to the caller of
3115 for_each_inc_dec. */
3118 for_each_inc_dec (rtx
*x
,
3119 for_each_inc_dec_fn fn
,
3122 struct for_each_inc_dec_ops data
;
3128 return for_each_rtx (x
, for_each_inc_dec_find_mem
, &data
);
3132 /* Searches X for any reference to REGNO, returning the rtx of the
3133 reference found if any. Otherwise, returns NULL_RTX. */
3136 regno_use_in (unsigned int regno
, rtx x
)
3142 if (REG_P (x
) && REGNO (x
) == regno
)
3145 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3146 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3150 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3153 else if (fmt
[i
] == 'E')
3154 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3155 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3162 /* Return a value indicating whether OP, an operand of a commutative
3163 operation, is preferred as the first or second operand. The higher
3164 the value, the stronger the preference for being the first operand.
3165 We use negative values to indicate a preference for the first operand
3166 and positive values for the second operand. */
3169 commutative_operand_precedence (rtx op
)
3171 enum rtx_code code
= GET_CODE (op
);
3173 /* Constants always come the second operand. Prefer "nice" constants. */
3174 if (code
== CONST_INT
)
3176 if (code
== CONST_WIDE_INT
)
3178 if (code
== CONST_DOUBLE
)
3180 if (code
== CONST_FIXED
)
3182 op
= avoid_constant_pool_reference (op
);
3183 code
= GET_CODE (op
);
3185 switch (GET_RTX_CLASS (code
))
3188 if (code
== CONST_INT
)
3190 if (code
== CONST_WIDE_INT
)
3192 if (code
== CONST_DOUBLE
)
3194 if (code
== CONST_FIXED
)
3199 /* SUBREGs of objects should come second. */
3200 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3205 /* Complex expressions should be the first, so decrease priority
3206 of objects. Prefer pointer objects over non pointer objects. */
3207 if ((REG_P (op
) && REG_POINTER (op
))
3208 || (MEM_P (op
) && MEM_POINTER (op
)))
3212 case RTX_COMM_ARITH
:
3213 /* Prefer operands that are themselves commutative to be first.
3214 This helps to make things linear. In particular,
3215 (and (and (reg) (reg)) (not (reg))) is canonical. */
3219 /* If only one operand is a binary expression, it will be the first
3220 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3221 is canonical, although it will usually be further simplified. */
3225 /* Then prefer NEG and NOT. */
3226 if (code
== NEG
|| code
== NOT
)
3234 /* Return 1 iff it is necessary to swap operands of commutative operation
3235 in order to canonicalize expression. */
3238 swap_commutative_operands_p (rtx x
, rtx y
)
3240 return (commutative_operand_precedence (x
)
3241 < commutative_operand_precedence (y
));
3244 /* Return 1 if X is an autoincrement side effect and the register is
3245 not the stack pointer. */
3247 auto_inc_p (const_rtx x
)
3249 switch (GET_CODE (x
))
3257 /* There are no REG_INC notes for SP. */
3258 if (XEXP (x
, 0) != stack_pointer_rtx
)
3266 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3268 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3277 code
= GET_CODE (in
);
3278 fmt
= GET_RTX_FORMAT (code
);
3279 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3283 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3286 else if (fmt
[i
] == 'E')
3287 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3288 if (loc
== &XVECEXP (in
, i
, j
)
3289 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3295 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3296 and SUBREG_BYTE, return the bit offset where the subreg begins
3297 (counting from the least significant bit of the operand). */
3300 subreg_lsb_1 (enum machine_mode outer_mode
,
3301 enum machine_mode inner_mode
,
3302 unsigned int subreg_byte
)
3304 unsigned int bitpos
;
3308 /* A paradoxical subreg begins at bit position 0. */
3309 if (GET_MODE_PRECISION (outer_mode
) > GET_MODE_PRECISION (inner_mode
))
3312 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3313 /* If the subreg crosses a word boundary ensure that
3314 it also begins and ends on a word boundary. */
3315 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3316 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3317 && (subreg_byte
% UNITS_PER_WORD
3318 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3320 if (WORDS_BIG_ENDIAN
)
3321 word
= (GET_MODE_SIZE (inner_mode
)
3322 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3324 word
= subreg_byte
/ UNITS_PER_WORD
;
3325 bitpos
= word
* BITS_PER_WORD
;
3327 if (BYTES_BIG_ENDIAN
)
3328 byte
= (GET_MODE_SIZE (inner_mode
)
3329 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3331 byte
= subreg_byte
% UNITS_PER_WORD
;
3332 bitpos
+= byte
* BITS_PER_UNIT
;
3337 /* Given a subreg X, return the bit offset where the subreg begins
3338 (counting from the least significant bit of the reg). */
3341 subreg_lsb (const_rtx x
)
3343 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3347 /* Fill in information about a subreg of a hard register.
3348 xregno - A regno of an inner hard subreg_reg (or what will become one).
3349 xmode - The mode of xregno.
3350 offset - The byte offset.
3351 ymode - The mode of a top level SUBREG (or what may become one).
3352 info - Pointer to structure to fill in. */
3354 subreg_get_info (unsigned int xregno
, enum machine_mode xmode
,
3355 unsigned int offset
, enum machine_mode ymode
,
3356 struct subreg_info
*info
)
3358 int nregs_xmode
, nregs_ymode
;
3359 int mode_multiple
, nregs_multiple
;
3360 int offset_adj
, y_offset
, y_offset_adj
;
3361 int regsize_xmode
, regsize_ymode
;
3364 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3368 /* If there are holes in a non-scalar mode in registers, we expect
3369 that it is made up of its units concatenated together. */
3370 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3372 enum machine_mode xmode_unit
;
3374 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3375 if (GET_MODE_INNER (xmode
) == VOIDmode
)
3378 xmode_unit
= GET_MODE_INNER (xmode
);
3379 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3380 gcc_assert (nregs_xmode
3381 == (GET_MODE_NUNITS (xmode
)
3382 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3383 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3384 == (hard_regno_nregs
[xregno
][xmode_unit
]
3385 * GET_MODE_NUNITS (xmode
)));
3387 /* You can only ask for a SUBREG of a value with holes in the middle
3388 if you don't cross the holes. (Such a SUBREG should be done by
3389 picking a different register class, or doing it in memory if
3390 necessary.) An example of a value with holes is XCmode on 32-bit
3391 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3392 3 for each part, but in memory it's two 128-bit parts.
3393 Padding is assumed to be at the end (not necessarily the 'high part')
3395 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1
3396 < GET_MODE_NUNITS (xmode
))
3397 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3398 != ((offset
+ GET_MODE_SIZE (ymode
) - 1)
3399 / GET_MODE_SIZE (xmode_unit
))))
3401 info
->representable_p
= false;
3406 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3408 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3410 /* Paradoxical subregs are otherwise valid. */
3413 && GET_MODE_PRECISION (ymode
) > GET_MODE_PRECISION (xmode
))
3415 info
->representable_p
= true;
3416 /* If this is a big endian paradoxical subreg, which uses more
3417 actual hard registers than the original register, we must
3418 return a negative offset so that we find the proper highpart
3420 if (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
3421 ? REG_WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
3422 info
->offset
= nregs_xmode
- nregs_ymode
;
3425 info
->nregs
= nregs_ymode
;
3429 /* If registers store different numbers of bits in the different
3430 modes, we cannot generally form this subreg. */
3431 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3432 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3433 && (GET_MODE_SIZE (xmode
) % nregs_xmode
) == 0
3434 && (GET_MODE_SIZE (ymode
) % nregs_ymode
) == 0)
3436 regsize_xmode
= GET_MODE_SIZE (xmode
) / nregs_xmode
;
3437 regsize_ymode
= GET_MODE_SIZE (ymode
) / nregs_ymode
;
3438 if (!rknown
&& regsize_xmode
> regsize_ymode
&& nregs_ymode
> 1)
3440 info
->representable_p
= false;
3442 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3443 info
->offset
= offset
/ regsize_xmode
;
3446 if (!rknown
&& regsize_ymode
> regsize_xmode
&& nregs_xmode
> 1)
3448 info
->representable_p
= false;
3450 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3451 info
->offset
= offset
/ regsize_xmode
;
3456 /* Lowpart subregs are otherwise valid. */
3457 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3459 info
->representable_p
= true;
3462 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3465 info
->nregs
= nregs_ymode
;
3470 /* This should always pass, otherwise we don't know how to verify
3471 the constraint. These conditions may be relaxed but
3472 subreg_regno_offset would need to be redesigned. */
3473 gcc_assert ((GET_MODE_SIZE (xmode
) % GET_MODE_SIZE (ymode
)) == 0);
3474 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3476 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
3477 && GET_MODE_SIZE (xmode
) > UNITS_PER_WORD
)
3479 HOST_WIDE_INT xsize
= GET_MODE_SIZE (xmode
);
3480 HOST_WIDE_INT ysize
= GET_MODE_SIZE (ymode
);
3481 HOST_WIDE_INT off_low
= offset
& (ysize
- 1);
3482 HOST_WIDE_INT off_high
= offset
& ~(ysize
- 1);
3483 offset
= (xsize
- ysize
- off_high
) | off_low
;
3485 /* The XMODE value can be seen as a vector of NREGS_XMODE
3486 values. The subreg must represent a lowpart of given field.
3487 Compute what field it is. */
3488 offset_adj
= offset
;
3489 offset_adj
-= subreg_lowpart_offset (ymode
,
3490 mode_for_size (GET_MODE_BITSIZE (xmode
)
3494 /* Size of ymode must not be greater than the size of xmode. */
3495 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
3496 gcc_assert (mode_multiple
!= 0);
3498 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
3499 y_offset_adj
= offset_adj
/ GET_MODE_SIZE (ymode
);
3500 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
3502 gcc_assert ((offset_adj
% GET_MODE_SIZE (ymode
)) == 0);
3503 gcc_assert ((mode_multiple
% nregs_multiple
) == 0);
3507 info
->representable_p
= (!(y_offset_adj
% (mode_multiple
/ nregs_multiple
)));
3510 info
->offset
= (y_offset
/ (mode_multiple
/ nregs_multiple
)) * nregs_ymode
;
3511 info
->nregs
= nregs_ymode
;
3514 /* This function returns the regno offset of a subreg expression.
3515 xregno - A regno of an inner hard subreg_reg (or what will become one).
3516 xmode - The mode of xregno.
3517 offset - The byte offset.
3518 ymode - The mode of a top level SUBREG (or what may become one).
3519 RETURN - The regno offset which would be used. */
3521 subreg_regno_offset (unsigned int xregno
, enum machine_mode xmode
,
3522 unsigned int offset
, enum machine_mode ymode
)
3524 struct subreg_info info
;
3525 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3529 /* This function returns true when the offset is representable via
3530 subreg_offset in the given regno.
3531 xregno - A regno of an inner hard subreg_reg (or what will become one).
3532 xmode - The mode of xregno.
3533 offset - The byte offset.
3534 ymode - The mode of a top level SUBREG (or what may become one).
3535 RETURN - Whether the offset is representable. */
3537 subreg_offset_representable_p (unsigned int xregno
, enum machine_mode xmode
,
3538 unsigned int offset
, enum machine_mode ymode
)
3540 struct subreg_info info
;
3541 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3542 return info
.representable_p
;
3545 /* Return the number of a YMODE register to which
3547 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3549 can be simplified. Return -1 if the subreg can't be simplified.
3551 XREGNO is a hard register number. */
3554 simplify_subreg_regno (unsigned int xregno
, enum machine_mode xmode
,
3555 unsigned int offset
, enum machine_mode ymode
)
3557 struct subreg_info info
;
3558 unsigned int yregno
;
3560 #ifdef CANNOT_CHANGE_MODE_CLASS
3561 /* Give the backend a chance to disallow the mode change. */
3562 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3563 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3564 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
)
3565 /* We can use mode change in LRA for some transformations. */
3566 && ! lra_in_progress
)
3570 /* We shouldn't simplify stack-related registers. */
3571 if ((!reload_completed
|| frame_pointer_needed
)
3572 && xregno
== FRAME_POINTER_REGNUM
)
3575 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3576 && xregno
== ARG_POINTER_REGNUM
)
3579 if (xregno
== STACK_POINTER_REGNUM
3580 /* We should convert hard stack register in LRA if it is
3582 && ! lra_in_progress
)
3585 /* Try to get the register offset. */
3586 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3587 if (!info
.representable_p
)
3590 /* Make sure that the offsetted register value is in range. */
3591 yregno
= xregno
+ info
.offset
;
3592 if (!HARD_REGISTER_NUM_P (yregno
))
3595 /* See whether (reg:YMODE YREGNO) is valid.
3597 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3598 This is a kludge to work around how complex FP arguments are passed
3599 on IA-64 and should be fixed. See PR target/49226. */
3600 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3601 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3604 return (int) yregno
;
3607 /* Return the final regno that a subreg expression refers to. */
3609 subreg_regno (const_rtx x
)
3612 rtx subreg
= SUBREG_REG (x
);
3613 int regno
= REGNO (subreg
);
3615 ret
= regno
+ subreg_regno_offset (regno
,
3623 /* Return the number of registers that a subreg expression refers
3626 subreg_nregs (const_rtx x
)
3628 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3631 /* Return the number of registers that a subreg REG with REGNO
3632 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3633 changed so that the regno can be passed in. */
3636 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3638 struct subreg_info info
;
3639 rtx subreg
= SUBREG_REG (x
);
3641 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3647 struct parms_set_data
3653 /* Helper function for noticing stores to parameter registers. */
3655 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3657 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3658 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3659 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3661 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3666 /* Look backward for first parameter to be loaded.
3667 Note that loads of all parameters will not necessarily be
3668 found if CSE has eliminated some of them (e.g., an argument
3669 to the outer function is passed down as a parameter).
3670 Do not skip BOUNDARY. */
3672 find_first_parameter_load (rtx call_insn
, rtx boundary
)
3674 struct parms_set_data parm
;
3675 rtx p
, before
, first_set
;
3677 /* Since different machines initialize their parameter registers
3678 in different orders, assume nothing. Collect the set of all
3679 parameter registers. */
3680 CLEAR_HARD_REG_SET (parm
.regs
);
3682 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3683 if (GET_CODE (XEXP (p
, 0)) == USE
3684 && REG_P (XEXP (XEXP (p
, 0), 0)))
3686 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3688 /* We only care about registers which can hold function
3690 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3693 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3697 first_set
= call_insn
;
3699 /* Search backward for the first set of a register in this set. */
3700 while (parm
.nregs
&& before
!= boundary
)
3702 before
= PREV_INSN (before
);
3704 /* It is possible that some loads got CSEed from one call to
3705 another. Stop in that case. */
3706 if (CALL_P (before
))
3709 /* Our caller needs either ensure that we will find all sets
3710 (in case code has not been optimized yet), or take care
3711 for possible labels in a way by setting boundary to preceding
3713 if (LABEL_P (before
))
3715 gcc_assert (before
== boundary
);
3719 if (INSN_P (before
))
3721 int nregs_old
= parm
.nregs
;
3722 note_stores (PATTERN (before
), parms_set
, &parm
);
3723 /* If we found something that did not set a parameter reg,
3724 we're done. Do not keep going, as that might result
3725 in hoisting an insn before the setting of a pseudo
3726 that is used by the hoisted insn. */
3727 if (nregs_old
!= parm
.nregs
)
3736 /* Return true if we should avoid inserting code between INSN and preceding
3737 call instruction. */
3740 keep_with_call_p (const_rtx insn
)
3744 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
3746 if (REG_P (SET_DEST (set
))
3747 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
3748 && fixed_regs
[REGNO (SET_DEST (set
))]
3749 && general_operand (SET_SRC (set
), VOIDmode
))
3751 if (REG_P (SET_SRC (set
))
3752 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
3753 && REG_P (SET_DEST (set
))
3754 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3756 /* There may be a stack pop just after the call and before the store
3757 of the return register. Search for the actual store when deciding
3758 if we can break or not. */
3759 if (SET_DEST (set
) == stack_pointer_rtx
)
3761 /* This CONST_CAST is okay because next_nonnote_insn just
3762 returns its argument and we assign it to a const_rtx
3764 const_rtx i2
= next_nonnote_insn (CONST_CAST_RTX (insn
));
3765 if (i2
&& keep_with_call_p (i2
))
3772 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3773 to non-complex jumps. That is, direct unconditional, conditional,
3774 and tablejumps, but not computed jumps or returns. It also does
3775 not apply to the fallthru case of a conditional jump. */
3778 label_is_jump_target_p (const_rtx label
, const_rtx jump_insn
)
3780 rtx tmp
= JUMP_LABEL (jump_insn
);
3785 if (tablejump_p (jump_insn
, NULL
, &tmp
))
3787 rtvec vec
= XVEC (PATTERN (tmp
),
3788 GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
);
3789 int i
, veclen
= GET_NUM_ELEM (vec
);
3791 for (i
= 0; i
< veclen
; ++i
)
3792 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
3796 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
3803 /* Return an estimate of the cost of computing rtx X.
3804 One use is in cse, to decide which expression to keep in the hash table.
3805 Another is in rtl generation, to pick the cheapest way to multiply.
3806 Other uses like the latter are expected in the future.
3808 X appears as operand OPNO in an expression with code OUTER_CODE.
3809 SPEED specifies whether costs optimized for speed or size should
3813 rtx_cost (rtx x
, enum rtx_code outer_code
, int opno
, bool speed
)
3824 /* A size N times larger than UNITS_PER_WORD likely needs N times as
3825 many insns, taking N times as long. */
3826 factor
= GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
;
3830 /* Compute the default costs of certain things.
3831 Note that targetm.rtx_costs can override the defaults. */
3833 code
= GET_CODE (x
);
3837 /* Multiplication has time-complexity O(N*N), where N is the
3838 number of units (translated from digits) when using
3839 schoolbook long multiplication. */
3840 total
= factor
* factor
* COSTS_N_INSNS (5);
3846 /* Similarly, complexity for schoolbook long division. */
3847 total
= factor
* factor
* COSTS_N_INSNS (7);
3850 /* Used in combine.c as a marker. */
3854 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
3855 the mode for the factor. */
3856 factor
= GET_MODE_SIZE (GET_MODE (SET_DEST (x
))) / UNITS_PER_WORD
;
3861 total
= factor
* COSTS_N_INSNS (1);
3871 /* If we can't tie these modes, make this expensive. The larger
3872 the mode, the more expensive it is. */
3873 if (! MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (SUBREG_REG (x
))))
3874 return COSTS_N_INSNS (2 + factor
);
3878 if (targetm
.rtx_costs (x
, code
, outer_code
, opno
, &total
, speed
))
3883 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3884 which is already in total. */
3886 fmt
= GET_RTX_FORMAT (code
);
3887 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3889 total
+= rtx_cost (XEXP (x
, i
), code
, i
, speed
);
3890 else if (fmt
[i
] == 'E')
3891 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3892 total
+= rtx_cost (XVECEXP (x
, i
, j
), code
, i
, speed
);
3897 /* Fill in the structure C with information about both speed and size rtx
3898 costs for X, which is operand OPNO in an expression with code OUTER. */
3901 get_full_rtx_cost (rtx x
, enum rtx_code outer
, int opno
,
3902 struct full_rtx_costs
*c
)
3904 c
->speed
= rtx_cost (x
, outer
, opno
, true);
3905 c
->size
= rtx_cost (x
, outer
, opno
, false);
3909 /* Return cost of address expression X.
3910 Expect that X is properly formed address reference.
3912 SPEED parameter specify whether costs optimized for speed or size should
3916 address_cost (rtx x
, enum machine_mode mode
, addr_space_t as
, bool speed
)
3918 /* We may be asked for cost of various unusual addresses, such as operands
3919 of push instruction. It is not worthwhile to complicate writing
3920 of the target hook by such cases. */
3922 if (!memory_address_addr_space_p (mode
, x
, as
))
3925 return targetm
.address_cost (x
, mode
, as
, speed
);
3928 /* If the target doesn't override, compute the cost as with arithmetic. */
3931 default_address_cost (rtx x
, enum machine_mode
, addr_space_t
, bool speed
)
3933 return rtx_cost (x
, MEM
, 0, speed
);
3937 unsigned HOST_WIDE_INT
3938 nonzero_bits (const_rtx x
, enum machine_mode mode
)
3940 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3944 num_sign_bit_copies (const_rtx x
, enum machine_mode mode
)
3946 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3949 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3950 It avoids exponential behavior in nonzero_bits1 when X has
3951 identical subexpressions on the first or the second level. */
3953 static unsigned HOST_WIDE_INT
3954 cached_nonzero_bits (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
3955 enum machine_mode known_mode
,
3956 unsigned HOST_WIDE_INT known_ret
)
3958 if (x
== known_x
&& mode
== known_mode
)
3961 /* Try to find identical subexpressions. If found call
3962 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3963 precomputed value for the subexpression as KNOWN_RET. */
3965 if (ARITHMETIC_P (x
))
3967 rtx x0
= XEXP (x
, 0);
3968 rtx x1
= XEXP (x
, 1);
3970 /* Check the first level. */
3972 return nonzero_bits1 (x
, mode
, x0
, mode
,
3973 cached_nonzero_bits (x0
, mode
, known_x
,
3974 known_mode
, known_ret
));
3976 /* Check the second level. */
3977 if (ARITHMETIC_P (x0
)
3978 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
3979 return nonzero_bits1 (x
, mode
, x1
, mode
,
3980 cached_nonzero_bits (x1
, mode
, known_x
,
3981 known_mode
, known_ret
));
3983 if (ARITHMETIC_P (x1
)
3984 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
3985 return nonzero_bits1 (x
, mode
, x0
, mode
,
3986 cached_nonzero_bits (x0
, mode
, known_x
,
3987 known_mode
, known_ret
));
3990 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
3993 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3994 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3995 is less useful. We can't allow both, because that results in exponential
3996 run time recursion. There is a nullstone testcase that triggered
3997 this. This macro avoids accidental uses of num_sign_bit_copies. */
3998 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4000 /* Given an expression, X, compute which bits in X can be nonzero.
4001 We don't care about bits outside of those defined in MODE.
4003 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
4004 an arithmetic operation, we can do better. */
4006 static unsigned HOST_WIDE_INT
4007 nonzero_bits1 (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4008 enum machine_mode known_mode
,
4009 unsigned HOST_WIDE_INT known_ret
)
4011 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
4012 unsigned HOST_WIDE_INT inner_nz
;
4014 enum machine_mode inner_mode
;
4015 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
4017 /* For floating-point and vector values, assume all bits are needed. */
4018 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
4019 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4022 /* If X is wider than MODE, use its mode instead. */
4023 if (GET_MODE_PRECISION (GET_MODE (x
)) > mode_width
)
4025 mode
= GET_MODE (x
);
4026 nonzero
= GET_MODE_MASK (mode
);
4027 mode_width
= GET_MODE_PRECISION (mode
);
4030 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
4031 /* Our only callers in this case look for single bit values. So
4032 just return the mode mask. Those tests will then be false. */
4035 #ifndef WORD_REGISTER_OPERATIONS
4036 /* If MODE is wider than X, but both are a single word for both the host
4037 and target machines, we can compute this from which bits of the
4038 object might be nonzero in its own mode, taking into account the fact
4039 that on many CISC machines, accessing an object in a wider mode
4040 causes the high-order bits to become undefined. So they are
4041 not known to be zero. */
4043 if (GET_MODE (x
) != VOIDmode
&& GET_MODE (x
) != mode
4044 && GET_MODE_PRECISION (GET_MODE (x
)) <= BITS_PER_WORD
4045 && GET_MODE_PRECISION (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
4046 && GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (GET_MODE (x
)))
4048 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
4049 known_x
, known_mode
, known_ret
);
4050 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
4055 code
= GET_CODE (x
);
4059 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4060 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4061 all the bits above ptr_mode are known to be zero. */
4062 /* As we do not know which address space the pointer is referring to,
4063 we can do this only if the target does not support different pointer
4064 or address modes depending on the address space. */
4065 if (target_default_pointer_address_modes_p ()
4066 && POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4068 nonzero
&= GET_MODE_MASK (ptr_mode
);
4071 /* Include declared information about alignment of pointers. */
4072 /* ??? We don't properly preserve REG_POINTER changes across
4073 pointer-to-integer casts, so we can't trust it except for
4074 things that we know must be pointers. See execute/960116-1.c. */
4075 if ((x
== stack_pointer_rtx
4076 || x
== frame_pointer_rtx
4077 || x
== arg_pointer_rtx
)
4078 && REGNO_POINTER_ALIGN (REGNO (x
)))
4080 unsigned HOST_WIDE_INT alignment
4081 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
4083 #ifdef PUSH_ROUNDING
4084 /* If PUSH_ROUNDING is defined, it is possible for the
4085 stack to be momentarily aligned only to that amount,
4086 so we pick the least alignment. */
4087 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
4088 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
4092 nonzero
&= ~(alignment
- 1);
4096 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
4097 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
4098 known_mode
, known_ret
,
4102 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4103 known_mode
, known_ret
);
4105 return nonzero_for_hook
;
4109 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4110 /* If X is negative in MODE, sign-extend the value. */
4112 && mode_width
< BITS_PER_WORD
4113 && (UINTVAL (x
) & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
4115 return UINTVAL (x
) | (HOST_WIDE_INT_M1U
<< mode_width
);
4121 #ifdef LOAD_EXTEND_OP
4122 /* In many, if not most, RISC machines, reading a byte from memory
4123 zeros the rest of the register. Noticing that fact saves a lot
4124 of extra zero-extends. */
4125 if (LOAD_EXTEND_OP (GET_MODE (x
)) == ZERO_EXTEND
)
4126 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
4131 case UNEQ
: case LTGT
:
4132 case GT
: case GTU
: case UNGT
:
4133 case LT
: case LTU
: case UNLT
:
4134 case GE
: case GEU
: case UNGE
:
4135 case LE
: case LEU
: case UNLE
:
4136 case UNORDERED
: case ORDERED
:
4137 /* If this produces an integer result, we know which bits are set.
4138 Code here used to clear bits outside the mode of X, but that is
4140 /* Mind that MODE is the mode the caller wants to look at this
4141 operation in, and not the actual operation mode. We can wind
4142 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4143 that describes the results of a vector compare. */
4144 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
4145 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4146 nonzero
= STORE_FLAG_VALUE
;
4151 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4152 and num_sign_bit_copies. */
4153 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4154 == GET_MODE_PRECISION (GET_MODE (x
)))
4158 if (GET_MODE_PRECISION (GET_MODE (x
)) < mode_width
)
4159 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
4164 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4165 and num_sign_bit_copies. */
4166 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4167 == GET_MODE_PRECISION (GET_MODE (x
)))
4173 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4174 known_x
, known_mode
, known_ret
)
4175 & GET_MODE_MASK (mode
));
4179 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4180 known_x
, known_mode
, known_ret
);
4181 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4182 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4186 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4187 Otherwise, show all the bits in the outer mode but not the inner
4189 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4190 known_x
, known_mode
, known_ret
);
4191 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4193 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4194 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4195 inner_nz
|= (GET_MODE_MASK (mode
)
4196 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4199 nonzero
&= inner_nz
;
4203 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4204 known_x
, known_mode
, known_ret
)
4205 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4206 known_x
, known_mode
, known_ret
);
4210 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4212 unsigned HOST_WIDE_INT nonzero0
4213 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4214 known_x
, known_mode
, known_ret
);
4216 /* Don't call nonzero_bits for the second time if it cannot change
4218 if ((nonzero
& nonzero0
) != nonzero
)
4220 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4221 known_x
, known_mode
, known_ret
);
4225 case PLUS
: case MINUS
:
4227 case DIV
: case UDIV
:
4228 case MOD
: case UMOD
:
4229 /* We can apply the rules of arithmetic to compute the number of
4230 high- and low-order zero bits of these operations. We start by
4231 computing the width (position of the highest-order nonzero bit)
4232 and the number of low-order zero bits for each value. */
4234 unsigned HOST_WIDE_INT nz0
4235 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4236 known_x
, known_mode
, known_ret
);
4237 unsigned HOST_WIDE_INT nz1
4238 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4239 known_x
, known_mode
, known_ret
);
4240 int sign_index
= GET_MODE_PRECISION (GET_MODE (x
)) - 1;
4241 int width0
= floor_log2 (nz0
) + 1;
4242 int width1
= floor_log2 (nz1
) + 1;
4243 int low0
= floor_log2 (nz0
& -nz0
);
4244 int low1
= floor_log2 (nz1
& -nz1
);
4245 unsigned HOST_WIDE_INT op0_maybe_minusp
4246 = nz0
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4247 unsigned HOST_WIDE_INT op1_maybe_minusp
4248 = nz1
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4249 unsigned int result_width
= mode_width
;
4255 result_width
= MAX (width0
, width1
) + 1;
4256 result_low
= MIN (low0
, low1
);
4259 result_low
= MIN (low0
, low1
);
4262 result_width
= width0
+ width1
;
4263 result_low
= low0
+ low1
;
4268 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4269 result_width
= width0
;
4274 result_width
= width0
;
4279 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4280 result_width
= MIN (width0
, width1
);
4281 result_low
= MIN (low0
, low1
);
4286 result_width
= MIN (width0
, width1
);
4287 result_low
= MIN (low0
, low1
);
4293 if (result_width
< mode_width
)
4294 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << result_width
) - 1;
4297 nonzero
&= ~(((unsigned HOST_WIDE_INT
) 1 << result_low
) - 1);
4302 if (CONST_INT_P (XEXP (x
, 1))
4303 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4304 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (x
, 1))) - 1;
4308 /* If this is a SUBREG formed for a promoted variable that has
4309 been zero-extended, we know that at least the high-order bits
4310 are zero, though others might be too. */
4312 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
) > 0)
4313 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4314 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4315 known_x
, known_mode
, known_ret
);
4317 inner_mode
= GET_MODE (SUBREG_REG (x
));
4318 /* If the inner mode is a single word for both the host and target
4319 machines, we can compute this from which bits of the inner
4320 object might be nonzero. */
4321 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4322 && (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
))
4324 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4325 known_x
, known_mode
, known_ret
);
4327 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4328 /* If this is a typical RISC machine, we only have to worry
4329 about the way loads are extended. */
4330 if ((LOAD_EXTEND_OP (inner_mode
) == SIGN_EXTEND
4331 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4332 : LOAD_EXTEND_OP (inner_mode
) != ZERO_EXTEND
)
4333 || !MEM_P (SUBREG_REG (x
)))
4336 /* On many CISC machines, accessing an object in a wider mode
4337 causes the high-order bits to become undefined. So they are
4338 not known to be zero. */
4339 if (GET_MODE_PRECISION (GET_MODE (x
))
4340 > GET_MODE_PRECISION (inner_mode
))
4341 nonzero
|= (GET_MODE_MASK (GET_MODE (x
))
4342 & ~GET_MODE_MASK (inner_mode
));
4351 /* The nonzero bits are in two classes: any bits within MODE
4352 that aren't in GET_MODE (x) are always significant. The rest of the
4353 nonzero bits are those that are significant in the operand of
4354 the shift when shifted the appropriate number of bits. This
4355 shows that high-order bits are cleared by the right shift and
4356 low-order bits by left shifts. */
4357 if (CONST_INT_P (XEXP (x
, 1))
4358 && INTVAL (XEXP (x
, 1)) >= 0
4359 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4360 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4362 enum machine_mode inner_mode
= GET_MODE (x
);
4363 unsigned int width
= GET_MODE_PRECISION (inner_mode
);
4364 int count
= INTVAL (XEXP (x
, 1));
4365 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4366 unsigned HOST_WIDE_INT op_nonzero
4367 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4368 known_x
, known_mode
, known_ret
);
4369 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4370 unsigned HOST_WIDE_INT outer
= 0;
4372 if (mode_width
> width
)
4373 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4375 if (code
== LSHIFTRT
)
4377 else if (code
== ASHIFTRT
)
4381 /* If the sign bit may have been nonzero before the shift, we
4382 need to mark all the places it could have been copied to
4383 by the shift as possibly nonzero. */
4384 if (inner
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1 - count
)))
4385 inner
|= (((unsigned HOST_WIDE_INT
) 1 << count
) - 1)
4388 else if (code
== ASHIFT
)
4391 inner
= ((inner
<< (count
% width
)
4392 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4394 nonzero
&= (outer
| inner
);
4400 /* This is at most the number of bits in the mode. */
4401 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4405 /* If CLZ has a known value at zero, then the nonzero bits are
4406 that value, plus the number of bits in the mode minus one. */
4407 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4409 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4415 /* If CTZ has a known value at zero, then the nonzero bits are
4416 that value, plus the number of bits in the mode minus one. */
4417 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4419 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4425 /* This is at most the number of bits in the mode minus 1. */
4426 nonzero
= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4435 unsigned HOST_WIDE_INT nonzero_true
4436 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4437 known_x
, known_mode
, known_ret
);
4439 /* Don't call nonzero_bits for the second time if it cannot change
4441 if ((nonzero
& nonzero_true
) != nonzero
)
4442 nonzero
&= nonzero_true
4443 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4444 known_x
, known_mode
, known_ret
);
4455 /* See the macro definition above. */
4456 #undef cached_num_sign_bit_copies
4459 /* The function cached_num_sign_bit_copies is a wrapper around
4460 num_sign_bit_copies1. It avoids exponential behavior in
4461 num_sign_bit_copies1 when X has identical subexpressions on the
4462 first or the second level. */
4465 cached_num_sign_bit_copies (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4466 enum machine_mode known_mode
,
4467 unsigned int known_ret
)
4469 if (x
== known_x
&& mode
== known_mode
)
4472 /* Try to find identical subexpressions. If found call
4473 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4474 the precomputed value for the subexpression as KNOWN_RET. */
4476 if (ARITHMETIC_P (x
))
4478 rtx x0
= XEXP (x
, 0);
4479 rtx x1
= XEXP (x
, 1);
4481 /* Check the first level. */
4484 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4485 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4489 /* Check the second level. */
4490 if (ARITHMETIC_P (x0
)
4491 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4493 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4494 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4498 if (ARITHMETIC_P (x1
)
4499 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4501 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4502 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4507 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4510 /* Return the number of bits at the high-order end of X that are known to
4511 be equal to the sign bit. X will be used in mode MODE; if MODE is
4512 VOIDmode, X will be used in its own mode. The returned value will always
4513 be between 1 and the number of bits in MODE. */
4516 num_sign_bit_copies1 (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4517 enum machine_mode known_mode
,
4518 unsigned int known_ret
)
4520 enum rtx_code code
= GET_CODE (x
);
4521 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4522 int num0
, num1
, result
;
4523 unsigned HOST_WIDE_INT nonzero
;
4525 /* If we weren't given a mode, use the mode of X. If the mode is still
4526 VOIDmode, we don't know anything. Likewise if one of the modes is
4529 if (mode
== VOIDmode
)
4530 mode
= GET_MODE (x
);
4532 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4533 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4536 /* For a smaller object, just ignore the high bits. */
4537 if (bitwidth
< GET_MODE_PRECISION (GET_MODE (x
)))
4539 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4540 known_x
, known_mode
, known_ret
);
4542 num0
- (int) (GET_MODE_PRECISION (GET_MODE (x
)) - bitwidth
));
4545 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_PRECISION (GET_MODE (x
)))
4547 #ifndef WORD_REGISTER_OPERATIONS
4548 /* If this machine does not do all register operations on the entire
4549 register and MODE is wider than the mode of X, we can say nothing
4550 at all about the high-order bits. */
4553 /* Likewise on machines that do, if the mode of the object is smaller
4554 than a word and loads of that size don't sign extend, we can say
4555 nothing about the high order bits. */
4556 if (GET_MODE_PRECISION (GET_MODE (x
)) < BITS_PER_WORD
4557 #ifdef LOAD_EXTEND_OP
4558 && LOAD_EXTEND_OP (GET_MODE (x
)) != SIGN_EXTEND
4569 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4570 /* If pointers extend signed and this is a pointer in Pmode, say that
4571 all the bits above ptr_mode are known to be sign bit copies. */
4572 /* As we do not know which address space the pointer is referring to,
4573 we can do this only if the target does not support different pointer
4574 or address modes depending on the address space. */
4575 if (target_default_pointer_address_modes_p ()
4576 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4577 && mode
== Pmode
&& REG_POINTER (x
))
4578 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
4582 unsigned int copies_for_hook
= 1, copies
= 1;
4583 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4584 known_mode
, known_ret
,
4588 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4589 known_mode
, known_ret
);
4591 if (copies
> 1 || copies_for_hook
> 1)
4592 return MAX (copies
, copies_for_hook
);
4594 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4599 #ifdef LOAD_EXTEND_OP
4600 /* Some RISC machines sign-extend all loads of smaller than a word. */
4601 if (LOAD_EXTEND_OP (GET_MODE (x
)) == SIGN_EXTEND
)
4602 return MAX (1, ((int) bitwidth
4603 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1));
4608 /* If the constant is negative, take its 1's complement and remask.
4609 Then see how many zero bits we have. */
4610 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4611 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4612 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4613 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4615 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4618 /* If this is a SUBREG for a promoted object that is sign-extended
4619 and we are looking at it in a wider mode, we know that at least the
4620 high-order bits are known to be sign bit copies. */
4622 if (SUBREG_PROMOTED_VAR_P (x
) && ! SUBREG_PROMOTED_UNSIGNED_P (x
))
4624 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4625 known_x
, known_mode
, known_ret
);
4626 return MAX ((int) bitwidth
4627 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1,
4631 /* For a smaller object, just ignore the high bits. */
4632 if (bitwidth
<= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
))))
4634 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4635 known_x
, known_mode
, known_ret
);
4636 return MAX (1, (num0
4637 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
)))
4641 #ifdef WORD_REGISTER_OPERATIONS
4642 #ifdef LOAD_EXTEND_OP
4643 /* For paradoxical SUBREGs on machines where all register operations
4644 affect the entire register, just look inside. Note that we are
4645 passing MODE to the recursive call, so the number of sign bit copies
4646 will remain relative to that mode, not the inner mode. */
4648 /* This works only if loads sign extend. Otherwise, if we get a
4649 reload for the inner part, it may be loaded from the stack, and
4650 then we lose all sign bit copies that existed before the store
4653 if (paradoxical_subreg_p (x
)
4654 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4655 && MEM_P (SUBREG_REG (x
)))
4656 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4657 known_x
, known_mode
, known_ret
);
4663 if (CONST_INT_P (XEXP (x
, 1)))
4664 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
4668 return (bitwidth
- GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4669 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4670 known_x
, known_mode
, known_ret
));
4673 /* For a smaller object, just ignore the high bits. */
4674 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4675 known_x
, known_mode
, known_ret
);
4676 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4680 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4681 known_x
, known_mode
, known_ret
);
4683 case ROTATE
: case ROTATERT
:
4684 /* If we are rotating left by a number of bits less than the number
4685 of sign bit copies, we can just subtract that amount from the
4687 if (CONST_INT_P (XEXP (x
, 1))
4688 && INTVAL (XEXP (x
, 1)) >= 0
4689 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
4691 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4692 known_x
, known_mode
, known_ret
);
4693 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
4694 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
4699 /* In general, this subtracts one sign bit copy. But if the value
4700 is known to be positive, the number of sign bit copies is the
4701 same as that of the input. Finally, if the input has just one bit
4702 that might be nonzero, all the bits are copies of the sign bit. */
4703 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4704 known_x
, known_mode
, known_ret
);
4705 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4706 return num0
> 1 ? num0
- 1 : 1;
4708 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4713 && (((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
))
4718 case IOR
: case AND
: case XOR
:
4719 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
4720 /* Logical operations will preserve the number of sign-bit copies.
4721 MIN and MAX operations always return one of the operands. */
4722 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4723 known_x
, known_mode
, known_ret
);
4724 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4725 known_x
, known_mode
, known_ret
);
4727 /* If num1 is clearing some of the top bits then regardless of
4728 the other term, we are guaranteed to have at least that many
4729 high-order zero bits. */
4732 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4733 && CONST_INT_P (XEXP (x
, 1))
4734 && (UINTVAL (XEXP (x
, 1))
4735 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) == 0)
4738 /* Similarly for IOR when setting high-order bits. */
4741 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4742 && CONST_INT_P (XEXP (x
, 1))
4743 && (UINTVAL (XEXP (x
, 1))
4744 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4747 return MIN (num0
, num1
);
4749 case PLUS
: case MINUS
:
4750 /* For addition and subtraction, we can have a 1-bit carry. However,
4751 if we are subtracting 1 from a positive number, there will not
4752 be such a carry. Furthermore, if the positive number is known to
4753 be 0 or 1, we know the result is either -1 or 0. */
4755 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
4756 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
4758 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4759 if ((((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
) == 0)
4760 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
4761 : bitwidth
- floor_log2 (nonzero
) - 1);
4764 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4765 known_x
, known_mode
, known_ret
);
4766 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4767 known_x
, known_mode
, known_ret
);
4768 result
= MAX (1, MIN (num0
, num1
) - 1);
4773 /* The number of bits of the product is the sum of the number of
4774 bits of both terms. However, unless one of the terms if known
4775 to be positive, we must allow for an additional bit since negating
4776 a negative number can remove one sign bit copy. */
4778 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4779 known_x
, known_mode
, known_ret
);
4780 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4781 known_x
, known_mode
, known_ret
);
4783 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
4785 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4786 || (((nonzero_bits (XEXP (x
, 0), mode
)
4787 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4788 && ((nonzero_bits (XEXP (x
, 1), mode
)
4789 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)))
4793 return MAX (1, result
);
4796 /* The result must be <= the first operand. If the first operand
4797 has the high bit set, we know nothing about the number of sign
4799 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4801 else if ((nonzero_bits (XEXP (x
, 0), mode
)
4802 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4805 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4806 known_x
, known_mode
, known_ret
);
4809 /* The result must be <= the second operand. If the second operand
4810 has (or just might have) the high bit set, we know nothing about
4811 the number of sign bit copies. */
4812 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4814 else if ((nonzero_bits (XEXP (x
, 1), mode
)
4815 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4818 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4819 known_x
, known_mode
, known_ret
);
4822 /* Similar to unsigned division, except that we have to worry about
4823 the case where the divisor is negative, in which case we have
4825 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4826 known_x
, known_mode
, known_ret
);
4828 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4829 || (nonzero_bits (XEXP (x
, 1), mode
)
4830 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4836 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4837 known_x
, known_mode
, known_ret
);
4839 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4840 || (nonzero_bits (XEXP (x
, 1), mode
)
4841 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4847 /* Shifts by a constant add to the number of bits equal to the
4849 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4850 known_x
, known_mode
, known_ret
);
4851 if (CONST_INT_P (XEXP (x
, 1))
4852 && INTVAL (XEXP (x
, 1)) > 0
4853 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4854 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
4859 /* Left shifts destroy copies. */
4860 if (!CONST_INT_P (XEXP (x
, 1))
4861 || INTVAL (XEXP (x
, 1)) < 0
4862 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
4863 || INTVAL (XEXP (x
, 1)) >= GET_MODE_PRECISION (GET_MODE (x
)))
4866 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4867 known_x
, known_mode
, known_ret
);
4868 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
4871 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4872 known_x
, known_mode
, known_ret
);
4873 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
4874 known_x
, known_mode
, known_ret
);
4875 return MIN (num0
, num1
);
4877 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
4878 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
4879 case GEU
: case GTU
: case LEU
: case LTU
:
4880 case UNORDERED
: case ORDERED
:
4881 /* If the constant is negative, take its 1's complement and remask.
4882 Then see how many zero bits we have. */
4883 nonzero
= STORE_FLAG_VALUE
;
4884 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4885 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4886 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4888 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4894 /* If we haven't been able to figure it out by one of the above rules,
4895 see if some of the high-order bits are known to be zero. If so,
4896 count those bits and return one less than that amount. If we can't
4897 safely compute the mask for this mode, always return BITWIDTH. */
4899 bitwidth
= GET_MODE_PRECISION (mode
);
4900 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4903 nonzero
= nonzero_bits (x
, mode
);
4904 return nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))
4905 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
4908 /* Calculate the rtx_cost of a single instruction. A return value of
4909 zero indicates an instruction pattern without a known cost. */
4912 insn_rtx_cost (rtx pat
, bool speed
)
4917 /* Extract the single set rtx from the instruction pattern.
4918 We can't use single_set since we only have the pattern. */
4919 if (GET_CODE (pat
) == SET
)
4921 else if (GET_CODE (pat
) == PARALLEL
)
4924 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
4926 rtx x
= XVECEXP (pat
, 0, i
);
4927 if (GET_CODE (x
) == SET
)
4940 cost
= set_src_cost (SET_SRC (set
), speed
);
4941 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
4944 /* Given an insn INSN and condition COND, return the condition in a
4945 canonical form to simplify testing by callers. Specifically:
4947 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4948 (2) Both operands will be machine operands; (cc0) will have been replaced.
4949 (3) If an operand is a constant, it will be the second operand.
4950 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4951 for GE, GEU, and LEU.
4953 If the condition cannot be understood, or is an inequality floating-point
4954 comparison which needs to be reversed, 0 will be returned.
4956 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4958 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4959 insn used in locating the condition was found. If a replacement test
4960 of the condition is desired, it should be placed in front of that
4961 insn and we will be sure that the inputs are still valid.
4963 If WANT_REG is nonzero, we wish the condition to be relative to that
4964 register, if possible. Therefore, do not canonicalize the condition
4965 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4966 to be a compare to a CC mode register.
4968 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4972 canonicalize_condition (rtx insn
, rtx cond
, int reverse
, rtx
*earliest
,
4973 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
4980 int reverse_code
= 0;
4981 enum machine_mode mode
;
4982 basic_block bb
= BLOCK_FOR_INSN (insn
);
4984 code
= GET_CODE (cond
);
4985 mode
= GET_MODE (cond
);
4986 op0
= XEXP (cond
, 0);
4987 op1
= XEXP (cond
, 1);
4990 code
= reversed_comparison_code (cond
, insn
);
4991 if (code
== UNKNOWN
)
4997 /* If we are comparing a register with zero, see if the register is set
4998 in the previous insn to a COMPARE or a comparison operation. Perform
4999 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5002 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
5003 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
5004 && op1
== CONST0_RTX (GET_MODE (op0
))
5007 /* Set nonzero when we find something of interest. */
5011 /* If comparison with cc0, import actual comparison from compare
5015 if ((prev
= prev_nonnote_insn (prev
)) == 0
5016 || !NONJUMP_INSN_P (prev
)
5017 || (set
= single_set (prev
)) == 0
5018 || SET_DEST (set
) != cc0_rtx
)
5021 op0
= SET_SRC (set
);
5022 op1
= CONST0_RTX (GET_MODE (op0
));
5028 /* If this is a COMPARE, pick up the two things being compared. */
5029 if (GET_CODE (op0
) == COMPARE
)
5031 op1
= XEXP (op0
, 1);
5032 op0
= XEXP (op0
, 0);
5035 else if (!REG_P (op0
))
5038 /* Go back to the previous insn. Stop if it is not an INSN. We also
5039 stop if it isn't a single set or if it has a REG_INC note because
5040 we don't want to bother dealing with it. */
5042 prev
= prev_nonnote_nondebug_insn (prev
);
5045 || !NONJUMP_INSN_P (prev
)
5046 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
5047 /* In cfglayout mode, there do not have to be labels at the
5048 beginning of a block, or jumps at the end, so the previous
5049 conditions would not stop us when we reach bb boundary. */
5050 || BLOCK_FOR_INSN (prev
) != bb
)
5053 set
= set_of (op0
, prev
);
5056 && (GET_CODE (set
) != SET
5057 || !rtx_equal_p (SET_DEST (set
), op0
)))
5060 /* If this is setting OP0, get what it sets it to if it looks
5064 enum machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
5065 #ifdef FLOAT_STORE_FLAG_VALUE
5066 REAL_VALUE_TYPE fsfv
;
5069 /* ??? We may not combine comparisons done in a CCmode with
5070 comparisons not done in a CCmode. This is to aid targets
5071 like Alpha that have an IEEE compliant EQ instruction, and
5072 a non-IEEE compliant BEQ instruction. The use of CCmode is
5073 actually artificial, simply to prevent the combination, but
5074 should not affect other platforms.
5076 However, we must allow VOIDmode comparisons to match either
5077 CCmode or non-CCmode comparison, because some ports have
5078 modeless comparisons inside branch patterns.
5080 ??? This mode check should perhaps look more like the mode check
5081 in simplify_comparison in combine. */
5082 if (((GET_MODE_CLASS (mode
) == MODE_CC
)
5083 != (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5085 && inner_mode
!= VOIDmode
)
5087 if (GET_CODE (SET_SRC (set
)) == COMPARE
5090 && val_signbit_known_set_p (inner_mode
,
5092 #ifdef FLOAT_STORE_FLAG_VALUE
5094 && SCALAR_FLOAT_MODE_P (inner_mode
)
5095 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5096 REAL_VALUE_NEGATIVE (fsfv
)))
5099 && COMPARISON_P (SET_SRC (set
))))
5101 else if (((code
== EQ
5103 && val_signbit_known_set_p (inner_mode
,
5105 #ifdef FLOAT_STORE_FLAG_VALUE
5107 && SCALAR_FLOAT_MODE_P (inner_mode
)
5108 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5109 REAL_VALUE_NEGATIVE (fsfv
)))
5112 && COMPARISON_P (SET_SRC (set
)))
5117 else if ((code
== EQ
|| code
== NE
)
5118 && GET_CODE (SET_SRC (set
)) == XOR
)
5119 /* Handle sequences like:
5122 ...(eq|ne op0 (const_int 0))...
5126 (eq op0 (const_int 0)) reduces to (eq X Y)
5127 (ne op0 (const_int 0)) reduces to (ne X Y)
5129 This is the form used by MIPS16, for example. */
5135 else if (reg_set_p (op0
, prev
))
5136 /* If this sets OP0, but not directly, we have to give up. */
5141 /* If the caller is expecting the condition to be valid at INSN,
5142 make sure X doesn't change before INSN. */
5143 if (valid_at_insn_p
)
5144 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5146 if (COMPARISON_P (x
))
5147 code
= GET_CODE (x
);
5150 code
= reversed_comparison_code (x
, prev
);
5151 if (code
== UNKNOWN
)
5156 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5162 /* If constant is first, put it last. */
5163 if (CONSTANT_P (op0
))
5164 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5166 /* If OP0 is the result of a comparison, we weren't able to find what
5167 was really being compared, so fail. */
5169 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5172 /* Canonicalize any ordered comparison with integers involving equality
5173 if we can do computations in the relevant mode and we do not
5176 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
5177 && CONST_INT_P (op1
)
5178 && GET_MODE (op0
) != VOIDmode
5179 && GET_MODE_PRECISION (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
5181 HOST_WIDE_INT const_val
= INTVAL (op1
);
5182 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5183 unsigned HOST_WIDE_INT max_val
5184 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
5189 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5190 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
5193 /* When cross-compiling, const_val might be sign-extended from
5194 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5196 if ((const_val
& max_val
)
5197 != ((unsigned HOST_WIDE_INT
) 1
5198 << (GET_MODE_PRECISION (GET_MODE (op0
)) - 1)))
5199 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
5203 if (uconst_val
< max_val
)
5204 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
5208 if (uconst_val
!= 0)
5209 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
5217 /* Never return CC0; return zero instead. */
5221 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5224 /* Given a jump insn JUMP, return the condition that will cause it to branch
5225 to its JUMP_LABEL. If the condition cannot be understood, or is an
5226 inequality floating-point comparison which needs to be reversed, 0 will
5229 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5230 insn used in locating the condition was found. If a replacement test
5231 of the condition is desired, it should be placed in front of that
5232 insn and we will be sure that the inputs are still valid. If EARLIEST
5233 is null, the returned condition will be valid at INSN.
5235 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5236 compare CC mode register.
5238 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5241 get_condition (rtx jump
, rtx
*earliest
, int allow_cc_mode
, int valid_at_insn_p
)
5247 /* If this is not a standard conditional jump, we can't parse it. */
5249 || ! any_condjump_p (jump
))
5251 set
= pc_set (jump
);
5253 cond
= XEXP (SET_SRC (set
), 0);
5255 /* If this branches to JUMP_LABEL when the condition is false, reverse
5258 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5259 && XEXP (XEXP (SET_SRC (set
), 2), 0) == JUMP_LABEL (jump
);
5261 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5262 allow_cc_mode
, valid_at_insn_p
);
5265 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5266 TARGET_MODE_REP_EXTENDED.
5268 Note that we assume that the property of
5269 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5270 narrower than mode B. I.e., if A is a mode narrower than B then in
5271 order to be able to operate on it in mode B, mode A needs to
5272 satisfy the requirements set by the representation of mode B. */
5275 init_num_sign_bit_copies_in_rep (void)
5277 enum machine_mode mode
, in_mode
;
5279 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
5280 in_mode
= GET_MODE_WIDER_MODE (mode
))
5281 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
5282 mode
= GET_MODE_WIDER_MODE (mode
))
5284 enum machine_mode i
;
5286 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5287 extends to the next widest mode. */
5288 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5289 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
5291 /* We are in in_mode. Count how many bits outside of mode
5292 have to be copies of the sign-bit. */
5293 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
5295 enum machine_mode wider
= GET_MODE_WIDER_MODE (i
);
5297 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5298 /* We can only check sign-bit copies starting from the
5299 top-bit. In order to be able to check the bits we
5300 have already seen we pretend that subsequent bits
5301 have to be sign-bit copies too. */
5302 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5303 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5304 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5309 /* Suppose that truncation from the machine mode of X to MODE is not a
5310 no-op. See if there is anything special about X so that we can
5311 assume it already contains a truncated value of MODE. */
5314 truncated_to_mode (enum machine_mode mode
, const_rtx x
)
5316 /* This register has already been used in MODE without explicit
5318 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5321 /* See if we already satisfy the requirements of MODE. If yes we
5322 can just switch to MODE. */
5323 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5324 && (num_sign_bit_copies (x
, GET_MODE (x
))
5325 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5331 /* Initialize non_rtx_starting_operands, which is used to speed up
5337 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5339 const char *format
= GET_RTX_FORMAT (i
);
5340 const char *first
= strpbrk (format
, "eEV");
5341 non_rtx_starting_operands
[i
] = first
? first
- format
: -1;
5344 init_num_sign_bit_copies_in_rep ();
5347 /* Check whether this is a constant pool constant. */
5349 constant_pool_constant_p (rtx x
)
5351 x
= avoid_constant_pool_reference (x
);
5352 return CONST_DOUBLE_P (x
);
5355 /* If M is a bitmask that selects a field of low-order bits within an item but
5356 not the entire word, return the length of the field. Return -1 otherwise.
5357 M is used in machine mode MODE. */
5360 low_bitmask_len (enum machine_mode mode
, unsigned HOST_WIDE_INT m
)
5362 if (mode
!= VOIDmode
)
5364 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
5366 m
&= GET_MODE_MASK (mode
);
5369 return exact_log2 (m
+ 1);
5372 /* Return the mode of MEM's address. */
5375 get_address_mode (rtx mem
)
5377 enum machine_mode mode
;
5379 gcc_assert (MEM_P (mem
));
5380 mode
= GET_MODE (XEXP (mem
, 0));
5381 if (mode
!= VOIDmode
)
5383 return targetm
.addr_space
.address_mode (MEM_ADDR_SPACE (mem
));
5386 /* Split up a CONST_DOUBLE or integer constant rtx
5387 into two rtx's for single words,
5388 storing in *FIRST the word that comes first in memory in the target
5389 and in *SECOND the other.
5391 TODO: This function needs to be rewritten to work on any size
5395 split_double (rtx value
, rtx
*first
, rtx
*second
)
5397 if (CONST_INT_P (value
))
5399 if (HOST_BITS_PER_WIDE_INT
>= (2 * BITS_PER_WORD
))
5401 /* In this case the CONST_INT holds both target words.
5402 Extract the bits from it into two word-sized pieces.
5403 Sign extend each half to HOST_WIDE_INT. */
5404 unsigned HOST_WIDE_INT low
, high
;
5405 unsigned HOST_WIDE_INT mask
, sign_bit
, sign_extend
;
5406 unsigned bits_per_word
= BITS_PER_WORD
;
5408 /* Set sign_bit to the most significant bit of a word. */
5410 sign_bit
<<= bits_per_word
- 1;
5412 /* Set mask so that all bits of the word are set. We could
5413 have used 1 << BITS_PER_WORD instead of basing the
5414 calculation on sign_bit. However, on machines where
5415 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5416 compiler warning, even though the code would never be
5418 mask
= sign_bit
<< 1;
5421 /* Set sign_extend as any remaining bits. */
5422 sign_extend
= ~mask
;
5424 /* Pick the lower word and sign-extend it. */
5425 low
= INTVAL (value
);
5430 /* Pick the higher word, shifted to the least significant
5431 bits, and sign-extend it. */
5432 high
= INTVAL (value
);
5433 high
>>= bits_per_word
- 1;
5436 if (high
& sign_bit
)
5437 high
|= sign_extend
;
5439 /* Store the words in the target machine order. */
5440 if (WORDS_BIG_ENDIAN
)
5442 *first
= GEN_INT (high
);
5443 *second
= GEN_INT (low
);
5447 *first
= GEN_INT (low
);
5448 *second
= GEN_INT (high
);
5453 /* The rule for using CONST_INT for a wider mode
5454 is that we regard the value as signed.
5455 So sign-extend it. */
5456 rtx high
= (INTVAL (value
) < 0 ? constm1_rtx
: const0_rtx
);
5457 if (WORDS_BIG_ENDIAN
)
5469 else if (GET_CODE (value
) == CONST_WIDE_INT
)
5471 /* All of this is scary code and needs to be converted to
5472 properly work with any size integer. */
5473 gcc_assert (CONST_WIDE_INT_NUNITS (value
) == 2);
5474 if (WORDS_BIG_ENDIAN
)
5476 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5477 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5481 *first
= GEN_INT (CONST_WIDE_INT_ELT (value
, 0));
5482 *second
= GEN_INT (CONST_WIDE_INT_ELT (value
, 1));
5485 else if (!CONST_DOUBLE_P (value
))
5487 if (WORDS_BIG_ENDIAN
)
5489 *first
= const0_rtx
;
5495 *second
= const0_rtx
;
5498 else if (GET_MODE (value
) == VOIDmode
5499 /* This is the old way we did CONST_DOUBLE integers. */
5500 || GET_MODE_CLASS (GET_MODE (value
)) == MODE_INT
)
5502 /* In an integer, the words are defined as most and least significant.
5503 So order them by the target's convention. */
5504 if (WORDS_BIG_ENDIAN
)
5506 *first
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5507 *second
= GEN_INT (CONST_DOUBLE_LOW (value
));
5511 *first
= GEN_INT (CONST_DOUBLE_LOW (value
));
5512 *second
= GEN_INT (CONST_DOUBLE_HIGH (value
));
5519 REAL_VALUE_FROM_CONST_DOUBLE (r
, value
);
5521 /* Note, this converts the REAL_VALUE_TYPE to the target's
5522 format, splits up the floating point double and outputs
5523 exactly 32 bits of it into each of l[0] and l[1] --
5524 not necessarily BITS_PER_WORD bits. */
5525 REAL_VALUE_TO_TARGET_DOUBLE (r
, l
);
5527 /* If 32 bits is an entire word for the target, but not for the host,
5528 then sign-extend on the host so that the number will look the same
5529 way on the host that it would on the target. See for instance
5530 simplify_unary_operation. The #if is needed to avoid compiler
5533 #if HOST_BITS_PER_LONG > 32
5534 if (BITS_PER_WORD
< HOST_BITS_PER_LONG
&& BITS_PER_WORD
== 32)
5536 if (l
[0] & ((long) 1 << 31))
5537 l
[0] |= ((long) (-1) << 32);
5538 if (l
[1] & ((long) 1 << 31))
5539 l
[1] |= ((long) (-1) << 32);
5543 *first
= GEN_INT (l
[0]);
5544 *second
= GEN_INT (l
[1]);
5548 /* Return true if X is a sign_extract or zero_extract from the least
5552 lsb_bitfield_op_p (rtx x
)
5554 if (GET_RTX_CLASS (GET_CODE (x
)) == RTX_BITFIELD_OPS
)
5556 enum machine_mode mode
= GET_MODE (XEXP (x
, 0));
5557 HOST_WIDE_INT len
= INTVAL (XEXP (x
, 1));
5558 HOST_WIDE_INT pos
= INTVAL (XEXP (x
, 2));
5560 return (pos
== (BITS_BIG_ENDIAN
? GET_MODE_PRECISION (mode
) - len
: 0));
5565 /* Strip outer address "mutations" from LOC and return a pointer to the
5566 inner value. If OUTER_CODE is nonnull, store the code of the innermost
5567 stripped expression there.
5569 "Mutations" either convert between modes or apply some kind of
5570 extension, truncation or alignment. */
5573 strip_address_mutations (rtx
*loc
, enum rtx_code
*outer_code
)
5577 enum rtx_code code
= GET_CODE (*loc
);
5578 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
5579 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
5580 used to convert between pointer sizes. */
5581 loc
= &XEXP (*loc
, 0);
5582 else if (lsb_bitfield_op_p (*loc
))
5583 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
5584 acts as a combined truncation and extension. */
5585 loc
= &XEXP (*loc
, 0);
5586 else if (code
== AND
&& CONST_INT_P (XEXP (*loc
, 1)))
5587 /* (and ... (const_int -X)) is used to align to X bytes. */
5588 loc
= &XEXP (*loc
, 0);
5589 else if (code
== SUBREG
5590 && !OBJECT_P (SUBREG_REG (*loc
))
5591 && subreg_lowpart_p (*loc
))
5592 /* (subreg (operator ...) ...) inside and is used for mode
5594 loc
= &SUBREG_REG (*loc
);
5602 /* Return true if CODE applies some kind of scale. The scaled value is
5603 is the first operand and the scale is the second. */
5606 binary_scale_code_p (enum rtx_code code
)
5608 return (code
== MULT
5610 /* Needed by ARM targets. */
5614 || code
== ROTATERT
);
5617 /* If *INNER can be interpreted as a base, return a pointer to the inner term
5618 (see address_info). Return null otherwise. */
5621 get_base_term (rtx
*inner
)
5623 if (GET_CODE (*inner
) == LO_SUM
)
5624 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5627 || GET_CODE (*inner
) == SUBREG
)
5632 /* If *INNER can be interpreted as an index, return a pointer to the inner term
5633 (see address_info). Return null otherwise. */
5636 get_index_term (rtx
*inner
)
5638 /* At present, only constant scales are allowed. */
5639 if (binary_scale_code_p (GET_CODE (*inner
)) && CONSTANT_P (XEXP (*inner
, 1)))
5640 inner
= strip_address_mutations (&XEXP (*inner
, 0));
5643 || GET_CODE (*inner
) == SUBREG
)
5648 /* Set the segment part of address INFO to LOC, given that INNER is the
5652 set_address_segment (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5654 gcc_assert (!info
->segment
);
5655 info
->segment
= loc
;
5656 info
->segment_term
= inner
;
5659 /* Set the base part of address INFO to LOC, given that INNER is the
5663 set_address_base (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5665 gcc_assert (!info
->base
);
5667 info
->base_term
= inner
;
5670 /* Set the index part of address INFO to LOC, given that INNER is the
5674 set_address_index (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5676 gcc_assert (!info
->index
);
5678 info
->index_term
= inner
;
5681 /* Set the displacement part of address INFO to LOC, given that INNER
5682 is the constant term. */
5685 set_address_disp (struct address_info
*info
, rtx
*loc
, rtx
*inner
)
5687 gcc_assert (!info
->disp
);
5689 info
->disp_term
= inner
;
5692 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
5693 rest of INFO accordingly. */
5696 decompose_incdec_address (struct address_info
*info
)
5698 info
->autoinc_p
= true;
5700 rtx
*base
= &XEXP (*info
->inner
, 0);
5701 set_address_base (info
, base
, base
);
5702 gcc_checking_assert (info
->base
== info
->base_term
);
5704 /* These addresses are only valid when the size of the addressed
5706 gcc_checking_assert (info
->mode
!= VOIDmode
);
5709 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
5710 of INFO accordingly. */
5713 decompose_automod_address (struct address_info
*info
)
5715 info
->autoinc_p
= true;
5717 rtx
*base
= &XEXP (*info
->inner
, 0);
5718 set_address_base (info
, base
, base
);
5719 gcc_checking_assert (info
->base
== info
->base_term
);
5721 rtx plus
= XEXP (*info
->inner
, 1);
5722 gcc_assert (GET_CODE (plus
) == PLUS
);
5724 info
->base_term2
= &XEXP (plus
, 0);
5725 gcc_checking_assert (rtx_equal_p (*info
->base_term
, *info
->base_term2
));
5727 rtx
*step
= &XEXP (plus
, 1);
5728 rtx
*inner_step
= strip_address_mutations (step
);
5729 if (CONSTANT_P (*inner_step
))
5730 set_address_disp (info
, step
, inner_step
);
5732 set_address_index (info
, step
, inner_step
);
5735 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
5736 values in [PTR, END). Return a pointer to the end of the used array. */
5739 extract_plus_operands (rtx
*loc
, rtx
**ptr
, rtx
**end
)
5742 if (GET_CODE (x
) == PLUS
)
5744 ptr
= extract_plus_operands (&XEXP (x
, 0), ptr
, end
);
5745 ptr
= extract_plus_operands (&XEXP (x
, 1), ptr
, end
);
5749 gcc_assert (ptr
!= end
);
5755 /* Evaluate the likelihood of X being a base or index value, returning
5756 positive if it is likely to be a base, negative if it is likely to be
5757 an index, and 0 if we can't tell. Make the magnitude of the return
5758 value reflect the amount of confidence we have in the answer.
5760 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
5763 baseness (rtx x
, enum machine_mode mode
, addr_space_t as
,
5764 enum rtx_code outer_code
, enum rtx_code index_code
)
5766 /* Believe *_POINTER unless the address shape requires otherwise. */
5767 if (REG_P (x
) && REG_POINTER (x
))
5769 if (MEM_P (x
) && MEM_POINTER (x
))
5772 if (REG_P (x
) && HARD_REGISTER_P (x
))
5774 /* X is a hard register. If it only fits one of the base
5775 or index classes, choose that interpretation. */
5776 int regno
= REGNO (x
);
5777 bool base_p
= ok_for_base_p_1 (regno
, mode
, as
, outer_code
, index_code
);
5778 bool index_p
= REGNO_OK_FOR_INDEX_P (regno
);
5779 if (base_p
!= index_p
)
5780 return base_p
? 1 : -1;
5785 /* INFO->INNER describes a normal, non-automodified address.
5786 Fill in the rest of INFO accordingly. */
5789 decompose_normal_address (struct address_info
*info
)
5791 /* Treat the address as the sum of up to four values. */
5793 size_t n_ops
= extract_plus_operands (info
->inner
, ops
,
5794 ops
+ ARRAY_SIZE (ops
)) - ops
;
5796 /* If there is more than one component, any base component is in a PLUS. */
5798 info
->base_outer_code
= PLUS
;
5800 /* Try to classify each sum operand now. Leave those that could be
5801 either a base or an index in OPS. */
5804 for (size_t in
= 0; in
< n_ops
; ++in
)
5807 rtx
*inner
= strip_address_mutations (loc
);
5808 if (CONSTANT_P (*inner
))
5809 set_address_disp (info
, loc
, inner
);
5810 else if (GET_CODE (*inner
) == UNSPEC
)
5811 set_address_segment (info
, loc
, inner
);
5814 /* The only other possibilities are a base or an index. */
5815 rtx
*base_term
= get_base_term (inner
);
5816 rtx
*index_term
= get_index_term (inner
);
5817 gcc_assert (base_term
|| index_term
);
5819 set_address_index (info
, loc
, index_term
);
5820 else if (!index_term
)
5821 set_address_base (info
, loc
, base_term
);
5824 gcc_assert (base_term
== index_term
);
5826 inner_ops
[out
] = base_term
;
5832 /* Classify the remaining OPS members as bases and indexes. */
5835 /* If we haven't seen a base or an index yet, assume that this is
5836 the base. If we were confident that another term was the base
5837 or index, treat the remaining operand as the other kind. */
5839 set_address_base (info
, ops
[0], inner_ops
[0]);
5841 set_address_index (info
, ops
[0], inner_ops
[0]);
5845 /* In the event of a tie, assume the base comes first. */
5846 if (baseness (*inner_ops
[0], info
->mode
, info
->as
, PLUS
,
5848 >= baseness (*inner_ops
[1], info
->mode
, info
->as
, PLUS
,
5849 GET_CODE (*ops
[0])))
5851 set_address_base (info
, ops
[0], inner_ops
[0]);
5852 set_address_index (info
, ops
[1], inner_ops
[1]);
5856 set_address_base (info
, ops
[1], inner_ops
[1]);
5857 set_address_index (info
, ops
[0], inner_ops
[0]);
5861 gcc_assert (out
== 0);
5864 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
5865 or VOIDmode if not known. AS is the address space associated with LOC.
5866 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
5869 decompose_address (struct address_info
*info
, rtx
*loc
, enum machine_mode mode
,
5870 addr_space_t as
, enum rtx_code outer_code
)
5872 memset (info
, 0, sizeof (*info
));
5875 info
->addr_outer_code
= outer_code
;
5877 info
->inner
= strip_address_mutations (loc
, &outer_code
);
5878 info
->base_outer_code
= outer_code
;
5879 switch (GET_CODE (*info
->inner
))
5885 decompose_incdec_address (info
);
5890 decompose_automod_address (info
);
5894 decompose_normal_address (info
);
5899 /* Describe address operand LOC in INFO. */
5902 decompose_lea_address (struct address_info
*info
, rtx
*loc
)
5904 decompose_address (info
, loc
, VOIDmode
, ADDR_SPACE_GENERIC
, ADDRESS
);
5907 /* Describe the address of MEM X in INFO. */
5910 decompose_mem_address (struct address_info
*info
, rtx x
)
5912 gcc_assert (MEM_P (x
));
5913 decompose_address (info
, &XEXP (x
, 0), GET_MODE (x
),
5914 MEM_ADDR_SPACE (x
), MEM
);
5917 /* Update INFO after a change to the address it describes. */
5920 update_address (struct address_info
*info
)
5922 decompose_address (info
, info
->outer
, info
->mode
, info
->as
,
5923 info
->addr_outer_code
);
5926 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
5927 more complicated than that. */
5930 get_index_scale (const struct address_info
*info
)
5932 rtx index
= *info
->index
;
5933 if (GET_CODE (index
) == MULT
5934 && CONST_INT_P (XEXP (index
, 1))
5935 && info
->index_term
== &XEXP (index
, 0))
5936 return INTVAL (XEXP (index
, 1));
5938 if (GET_CODE (index
) == ASHIFT
5939 && CONST_INT_P (XEXP (index
, 1))
5940 && info
->index_term
== &XEXP (index
, 0))
5941 return (HOST_WIDE_INT
) 1 << INTVAL (XEXP (index
, 1));
5943 if (info
->index
== info
->index_term
)
5949 /* Return the "index code" of INFO, in the form required by
5953 get_index_code (const struct address_info
*info
)
5956 return GET_CODE (*info
->index
);
5959 return GET_CODE (*info
->disp
);
5964 /* Return 1 if *X is a thread-local symbol. */
5967 tls_referenced_p_1 (rtx
*x
, void *)
5969 return GET_CODE (*x
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (*x
) != 0;
5972 /* Return true if X contains a thread-local symbol. */
5975 tls_referenced_p (rtx x
)
5977 if (!targetm
.have_tls
)
5980 return for_each_rtx (&x
, &tls_referenced_p_1
, 0);