1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
27 #include "diagnostic-core.h"
28 #include "hard-reg-set.h"
30 #include "insn-config.h"
40 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
42 /* Forward declarations */
43 static void set_of_1 (rtx
, const_rtx
, void *);
44 static bool covers_regno_p (const_rtx
, unsigned int);
45 static bool covers_regno_no_parallel_p (const_rtx
, unsigned int);
46 static int rtx_referenced_p_1 (rtx
*, void *);
47 static int computed_jump_p_1 (const_rtx
);
48 static void parms_set (rtx
, const_rtx
, void *);
50 static unsigned HOST_WIDE_INT
cached_nonzero_bits (const_rtx
, enum machine_mode
,
51 const_rtx
, enum machine_mode
,
52 unsigned HOST_WIDE_INT
);
53 static unsigned HOST_WIDE_INT
nonzero_bits1 (const_rtx
, enum machine_mode
,
54 const_rtx
, enum machine_mode
,
55 unsigned HOST_WIDE_INT
);
56 static unsigned int cached_num_sign_bit_copies (const_rtx
, enum machine_mode
, const_rtx
,
59 static unsigned int num_sign_bit_copies1 (const_rtx
, enum machine_mode
, const_rtx
,
60 enum machine_mode
, unsigned int);
62 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
63 -1 if a code has no such operand. */
64 static int non_rtx_starting_operands
[NUM_RTX_CODE
];
66 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
67 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
68 SIGN_EXTEND then while narrowing we also have to enforce the
69 representation and sign-extend the value to mode DESTINATION_REP.
71 If the value is already sign-extended to DESTINATION_REP mode we
72 can just switch to DESTINATION mode on it. For each pair of
73 integral modes SOURCE and DESTINATION, when truncating from SOURCE
74 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
75 contains the number of high-order bits in SOURCE that have to be
76 copies of the sign-bit so that we can do this mode-switch to
80 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
82 /* Return 1 if the value of X is unstable
83 (would be different at a different point in the program).
84 The frame pointer, arg pointer, etc. are considered stable
85 (within one function) and so is anything marked `unchanging'. */
88 rtx_unstable_p (const_rtx x
)
90 const RTX_CODE code
= GET_CODE (x
);
97 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
109 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
110 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
111 /* The arg pointer varies if it is not a fixed register. */
112 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
114 /* ??? When call-clobbered, the value is stable modulo the restore
115 that must happen after a call. This currently screws up local-alloc
116 into believing that the restore is not needed. */
117 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
&& x
== pic_offset_table_rtx
)
122 if (MEM_VOLATILE_P (x
))
131 fmt
= GET_RTX_FORMAT (code
);
132 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
135 if (rtx_unstable_p (XEXP (x
, i
)))
138 else if (fmt
[i
] == 'E')
141 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
142 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
149 /* Return 1 if X has a value that can vary even between two
150 executions of the program. 0 means X can be compared reliably
151 against certain constants or near-constants.
152 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
153 zero, we are slightly more conservative.
154 The frame pointer and the arg pointer are considered constant. */
157 rtx_varies_p (const_rtx x
, bool for_alias
)
170 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
182 /* Note that we have to test for the actual rtx used for the frame
183 and arg pointers and not just the register number in case we have
184 eliminated the frame and/or arg pointer and are using it
186 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
187 /* The arg pointer varies if it is not a fixed register. */
188 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
190 if (x
== pic_offset_table_rtx
191 /* ??? When call-clobbered, the value is stable modulo the restore
192 that must happen after a call. This currently screws up
193 local-alloc into believing that the restore is not needed, so we
194 must return 0 only if we are called from alias analysis. */
195 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
|| for_alias
))
200 /* The operand 0 of a LO_SUM is considered constant
201 (in fact it is related specifically to operand 1)
202 during alias analysis. */
203 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
204 || rtx_varies_p (XEXP (x
, 1), for_alias
);
207 if (MEM_VOLATILE_P (x
))
216 fmt
= GET_RTX_FORMAT (code
);
217 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
220 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
223 else if (fmt
[i
] == 'E')
226 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
227 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
234 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
235 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
236 whether nonzero is returned for unaligned memory accesses on strict
237 alignment machines. */
240 rtx_addr_can_trap_p_1 (const_rtx x
, HOST_WIDE_INT offset
, HOST_WIDE_INT size
,
241 enum machine_mode mode
, bool unaligned_mems
)
243 enum rtx_code code
= GET_CODE (x
);
247 && GET_MODE_SIZE (mode
) != 0)
249 HOST_WIDE_INT actual_offset
= offset
;
250 #ifdef SPARC_STACK_BOUNDARY_HACK
251 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
252 the real alignment of %sp. However, when it does this, the
253 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
254 if (SPARC_STACK_BOUNDARY_HACK
255 && (x
== stack_pointer_rtx
|| x
== hard_frame_pointer_rtx
))
256 actual_offset
-= STACK_POINTER_OFFSET
;
259 if (actual_offset
% GET_MODE_SIZE (mode
) != 0)
266 if (SYMBOL_REF_WEAK (x
))
268 if (!CONSTANT_POOL_ADDRESS_P (x
))
271 HOST_WIDE_INT decl_size
;
276 size
= GET_MODE_SIZE (mode
);
280 /* If the size of the access or of the symbol is unknown,
282 decl
= SYMBOL_REF_DECL (x
);
284 /* Else check that the access is in bounds. TODO: restructure
285 expr_size/tree_expr_size/int_expr_size and just use the latter. */
288 else if (DECL_P (decl
) && DECL_SIZE_UNIT (decl
))
289 decl_size
= (host_integerp (DECL_SIZE_UNIT (decl
), 0)
290 ? tree_low_cst (DECL_SIZE_UNIT (decl
), 0)
292 else if (TREE_CODE (decl
) == STRING_CST
)
293 decl_size
= TREE_STRING_LENGTH (decl
);
294 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl
)))
295 decl_size
= int_size_in_bytes (TREE_TYPE (decl
));
299 return (decl_size
<= 0 ? offset
!= 0 : offset
+ size
> decl_size
);
308 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
309 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
310 || x
== stack_pointer_rtx
311 /* The arg pointer varies if it is not a fixed register. */
312 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
314 /* All of the virtual frame registers are stack references. */
315 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
316 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
321 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
322 mode
, unaligned_mems
);
325 /* An address is assumed not to trap if:
326 - it is the pic register plus a constant. */
327 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
330 /* - or it is an address that can't trap plus a constant integer,
331 with the proper remainder modulo the mode size if we are
332 considering unaligned memory references. */
333 if (CONST_INT_P (XEXP (x
, 1))
334 && !rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
+ INTVAL (XEXP (x
, 1)),
335 size
, mode
, unaligned_mems
))
342 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), offset
, size
,
343 mode
, unaligned_mems
);
350 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), offset
, size
,
351 mode
, unaligned_mems
);
357 /* If it isn't one of the case above, it can cause a trap. */
361 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
364 rtx_addr_can_trap_p (const_rtx x
)
366 return rtx_addr_can_trap_p_1 (x
, 0, 0, VOIDmode
, false);
369 /* Return true if X is an address that is known to not be zero. */
372 nonzero_address_p (const_rtx x
)
374 const enum rtx_code code
= GET_CODE (x
);
379 return !SYMBOL_REF_WEAK (x
);
385 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
386 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
387 || x
== stack_pointer_rtx
388 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
390 /* All of the virtual frame registers are stack references. */
391 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
392 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
397 return nonzero_address_p (XEXP (x
, 0));
400 if (CONST_INT_P (XEXP (x
, 1)))
401 return nonzero_address_p (XEXP (x
, 0));
402 /* Handle PIC references. */
403 else if (XEXP (x
, 0) == pic_offset_table_rtx
404 && CONSTANT_P (XEXP (x
, 1)))
409 /* Similar to the above; allow positive offsets. Further, since
410 auto-inc is only allowed in memories, the register must be a
412 if (CONST_INT_P (XEXP (x
, 1))
413 && INTVAL (XEXP (x
, 1)) > 0)
415 return nonzero_address_p (XEXP (x
, 0));
418 /* Similarly. Further, the offset is always positive. */
425 return nonzero_address_p (XEXP (x
, 0));
428 return nonzero_address_p (XEXP (x
, 1));
434 /* If it isn't one of the case above, might be zero. */
438 /* Return 1 if X refers to a memory location whose address
439 cannot be compared reliably with constant addresses,
440 or if X refers to a BLKmode memory object.
441 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
442 zero, we are slightly more conservative. */
445 rtx_addr_varies_p (const_rtx x
, bool for_alias
)
456 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
458 fmt
= GET_RTX_FORMAT (code
);
459 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
462 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
465 else if (fmt
[i
] == 'E')
468 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
469 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
475 /* Return the value of the integer term in X, if one is apparent;
477 Only obvious integer terms are detected.
478 This is used in cse.c with the `related_value' field. */
481 get_integer_term (const_rtx x
)
483 if (GET_CODE (x
) == CONST
)
486 if (GET_CODE (x
) == MINUS
487 && CONST_INT_P (XEXP (x
, 1)))
488 return - INTVAL (XEXP (x
, 1));
489 if (GET_CODE (x
) == PLUS
490 && CONST_INT_P (XEXP (x
, 1)))
491 return INTVAL (XEXP (x
, 1));
495 /* If X is a constant, return the value sans apparent integer term;
497 Only obvious integer terms are detected. */
500 get_related_value (const_rtx x
)
502 if (GET_CODE (x
) != CONST
)
505 if (GET_CODE (x
) == PLUS
506 && CONST_INT_P (XEXP (x
, 1)))
508 else if (GET_CODE (x
) == MINUS
509 && CONST_INT_P (XEXP (x
, 1)))
514 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
515 to somewhere in the same object or object_block as SYMBOL. */
518 offset_within_block_p (const_rtx symbol
, HOST_WIDE_INT offset
)
522 if (GET_CODE (symbol
) != SYMBOL_REF
)
530 if (CONSTANT_POOL_ADDRESS_P (symbol
)
531 && offset
< (int) GET_MODE_SIZE (get_pool_mode (symbol
)))
534 decl
= SYMBOL_REF_DECL (symbol
);
535 if (decl
&& offset
< int_size_in_bytes (TREE_TYPE (decl
)))
539 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol
)
540 && SYMBOL_REF_BLOCK (symbol
)
541 && SYMBOL_REF_BLOCK_OFFSET (symbol
) >= 0
542 && ((unsigned HOST_WIDE_INT
) offset
+ SYMBOL_REF_BLOCK_OFFSET (symbol
)
543 < (unsigned HOST_WIDE_INT
) SYMBOL_REF_BLOCK (symbol
)->size
))
549 /* Split X into a base and a constant offset, storing them in *BASE_OUT
550 and *OFFSET_OUT respectively. */
553 split_const (rtx x
, rtx
*base_out
, rtx
*offset_out
)
555 if (GET_CODE (x
) == CONST
)
558 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1)))
560 *base_out
= XEXP (x
, 0);
561 *offset_out
= XEXP (x
, 1);
566 *offset_out
= const0_rtx
;
569 /* Return the number of places FIND appears within X. If COUNT_DEST is
570 zero, we do not count occurrences inside the destination of a SET. */
573 count_occurrences (const_rtx x
, const_rtx find
, int count_dest
)
577 const char *format_ptr
;
599 count
= count_occurrences (XEXP (x
, 0), find
, count_dest
);
601 count
+= count_occurrences (XEXP (x
, 1), find
, count_dest
);
605 if (MEM_P (find
) && rtx_equal_p (x
, find
))
610 if (SET_DEST (x
) == find
&& ! count_dest
)
611 return count_occurrences (SET_SRC (x
), find
, count_dest
);
618 format_ptr
= GET_RTX_FORMAT (code
);
621 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
623 switch (*format_ptr
++)
626 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
630 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
631 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
639 /* Nonzero if register REG appears somewhere within IN.
640 Also works if REG is not a register; in this case it checks
641 for a subexpression of IN that is Lisp "equal" to REG. */
644 reg_mentioned_p (const_rtx reg
, const_rtx in
)
656 if (GET_CODE (in
) == LABEL_REF
)
657 return reg
== XEXP (in
, 0);
659 code
= GET_CODE (in
);
663 /* Compare registers by number. */
665 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
667 /* These codes have no constituent expressions
678 /* These are kept unique for a given value. */
685 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
688 fmt
= GET_RTX_FORMAT (code
);
690 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
695 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
696 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
699 else if (fmt
[i
] == 'e'
700 && reg_mentioned_p (reg
, XEXP (in
, i
)))
706 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
707 no CODE_LABEL insn. */
710 no_labels_between_p (const_rtx beg
, const_rtx end
)
715 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
721 /* Nonzero if register REG is used in an insn between
722 FROM_INSN and TO_INSN (exclusive of those two). */
725 reg_used_between_p (const_rtx reg
, const_rtx from_insn
, const_rtx to_insn
)
729 if (from_insn
== to_insn
)
732 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
733 if (NONDEBUG_INSN_P (insn
)
734 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
735 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
740 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
741 is entirely replaced by a new value and the only use is as a SET_DEST,
742 we do not consider it a reference. */
745 reg_referenced_p (const_rtx x
, const_rtx body
)
749 switch (GET_CODE (body
))
752 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
755 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
756 of a REG that occupies all of the REG, the insn references X if
757 it is mentioned in the destination. */
758 if (GET_CODE (SET_DEST (body
)) != CC0
759 && GET_CODE (SET_DEST (body
)) != PC
760 && !REG_P (SET_DEST (body
))
761 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
762 && REG_P (SUBREG_REG (SET_DEST (body
)))
763 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
764 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
765 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
766 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
767 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
772 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
773 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
780 return reg_overlap_mentioned_p (x
, body
);
783 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
786 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
789 case UNSPEC_VOLATILE
:
790 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
791 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
796 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
797 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
802 if (MEM_P (XEXP (body
, 0)))
803 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
808 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
810 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
817 /* Nonzero if register REG is set or clobbered in an insn between
818 FROM_INSN and TO_INSN (exclusive of those two). */
821 reg_set_between_p (const_rtx reg
, const_rtx from_insn
, const_rtx to_insn
)
825 if (from_insn
== to_insn
)
828 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
829 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
834 /* Internals of reg_set_between_p. */
836 reg_set_p (const_rtx reg
, const_rtx insn
)
838 /* We can be passed an insn or part of one. If we are passed an insn,
839 check if a side-effect of the insn clobbers REG. */
841 && (FIND_REG_INC_NOTE (insn
, reg
)
844 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
845 && overlaps_hard_reg_set_p (regs_invalidated_by_call
,
846 GET_MODE (reg
), REGNO (reg
)))
848 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
851 return set_of (reg
, insn
) != NULL_RTX
;
854 /* Similar to reg_set_between_p, but check all registers in X. Return 0
855 only if none of them are modified between START and END. Return 1 if
856 X contains a MEM; this routine does use memory aliasing. */
859 modified_between_p (const_rtx x
, const_rtx start
, const_rtx end
)
861 const enum rtx_code code
= GET_CODE (x
);
885 if (modified_between_p (XEXP (x
, 0), start
, end
))
887 if (MEM_READONLY_P (x
))
889 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
890 if (memory_modified_in_insn_p (x
, insn
))
896 return reg_set_between_p (x
, start
, end
);
902 fmt
= GET_RTX_FORMAT (code
);
903 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
905 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
908 else if (fmt
[i
] == 'E')
909 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
910 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
917 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
918 of them are modified in INSN. Return 1 if X contains a MEM; this routine
919 does use memory aliasing. */
922 modified_in_p (const_rtx x
, const_rtx insn
)
924 const enum rtx_code code
= GET_CODE (x
);
944 if (modified_in_p (XEXP (x
, 0), insn
))
946 if (MEM_READONLY_P (x
))
948 if (memory_modified_in_insn_p (x
, insn
))
954 return reg_set_p (x
, insn
);
960 fmt
= GET_RTX_FORMAT (code
);
961 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
963 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
966 else if (fmt
[i
] == 'E')
967 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
968 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
975 /* Helper function for set_of. */
983 set_of_1 (rtx x
, const_rtx pat
, void *data1
)
985 struct set_of_data
*const data
= (struct set_of_data
*) (data1
);
986 if (rtx_equal_p (x
, data
->pat
)
987 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
991 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
992 (either directly or via STRICT_LOW_PART and similar modifiers). */
994 set_of (const_rtx pat
, const_rtx insn
)
996 struct set_of_data data
;
997 data
.found
= NULL_RTX
;
999 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
1003 /* This function, called through note_stores, collects sets and
1004 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1007 record_hard_reg_sets (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
1009 HARD_REG_SET
*pset
= (HARD_REG_SET
*)data
;
1010 if (REG_P (x
) && HARD_REGISTER_P (x
))
1011 add_to_hard_reg_set (pset
, GET_MODE (x
), REGNO (x
));
1014 /* Examine INSN, and compute the set of hard registers written by it.
1015 Store it in *PSET. Should only be called after reload. */
1017 find_all_hard_reg_sets (const_rtx insn
, HARD_REG_SET
*pset
)
1021 CLEAR_HARD_REG_SET (*pset
);
1022 note_stores (PATTERN (insn
), record_hard_reg_sets
, pset
);
1024 IOR_HARD_REG_SET (*pset
, call_used_reg_set
);
1025 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1026 if (REG_NOTE_KIND (link
) == REG_INC
)
1027 record_hard_reg_sets (XEXP (link
, 0), NULL
, pset
);
1030 /* A for_each_rtx subroutine of record_hard_reg_uses. */
1032 record_hard_reg_uses_1 (rtx
*px
, void *data
)
1035 HARD_REG_SET
*pused
= (HARD_REG_SET
*)data
;
1037 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
)
1039 int nregs
= hard_regno_nregs
[REGNO (x
)][GET_MODE (x
)];
1041 SET_HARD_REG_BIT (*pused
, REGNO (x
) + nregs
);
1046 /* Like record_hard_reg_sets, but called through note_uses. */
1048 record_hard_reg_uses (rtx
*px
, void *data
)
1050 for_each_rtx (px
, record_hard_reg_uses_1
, data
);
1053 /* Given an INSN, return a SET expression if this insn has only a single SET.
1054 It may also have CLOBBERs, USEs, or SET whose output
1055 will not be used, which we ignore. */
1058 single_set_2 (const_rtx insn
, const_rtx pat
)
1061 int set_verified
= 1;
1064 if (GET_CODE (pat
) == PARALLEL
)
1066 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1068 rtx sub
= XVECEXP (pat
, 0, i
);
1069 switch (GET_CODE (sub
))
1076 /* We can consider insns having multiple sets, where all
1077 but one are dead as single set insns. In common case
1078 only single set is present in the pattern so we want
1079 to avoid checking for REG_UNUSED notes unless necessary.
1081 When we reach set first time, we just expect this is
1082 the single set we are looking for and only when more
1083 sets are found in the insn, we check them. */
1086 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
1087 && !side_effects_p (set
))
1093 set
= sub
, set_verified
= 0;
1094 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
1095 || side_effects_p (sub
))
1107 /* Given an INSN, return nonzero if it has more than one SET, else return
1111 multiple_sets (const_rtx insn
)
1116 /* INSN must be an insn. */
1117 if (! INSN_P (insn
))
1120 /* Only a PARALLEL can have multiple SETs. */
1121 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1123 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
1124 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
1126 /* If we have already found a SET, then return now. */
1134 /* Either zero or one SET. */
1138 /* Return nonzero if the destination of SET equals the source
1139 and there are no side effects. */
1142 set_noop_p (const_rtx set
)
1144 rtx src
= SET_SRC (set
);
1145 rtx dst
= SET_DEST (set
);
1147 if (dst
== pc_rtx
&& src
== pc_rtx
)
1150 if (MEM_P (dst
) && MEM_P (src
))
1151 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1153 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1154 return rtx_equal_p (XEXP (dst
, 0), src
)
1155 && ! BYTES_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1156 && !side_effects_p (src
);
1158 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1159 dst
= XEXP (dst
, 0);
1161 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1163 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1165 src
= SUBREG_REG (src
);
1166 dst
= SUBREG_REG (dst
);
1169 return (REG_P (src
) && REG_P (dst
)
1170 && REGNO (src
) == REGNO (dst
));
1173 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1177 noop_move_p (const_rtx insn
)
1179 rtx pat
= PATTERN (insn
);
1181 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1184 /* Insns carrying these notes are useful later on. */
1185 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1188 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1191 if (GET_CODE (pat
) == PARALLEL
)
1194 /* If nothing but SETs of registers to themselves,
1195 this insn can also be deleted. */
1196 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1198 rtx tem
= XVECEXP (pat
, 0, i
);
1200 if (GET_CODE (tem
) == USE
1201 || GET_CODE (tem
) == CLOBBER
)
1204 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1214 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1215 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1216 If the object was modified, if we hit a partial assignment to X, or hit a
1217 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1218 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1222 find_last_value (rtx x
, rtx
*pinsn
, rtx valid_to
, int allow_hwreg
)
1226 for (p
= PREV_INSN (*pinsn
); p
&& !LABEL_P (p
);
1230 rtx set
= single_set (p
);
1231 rtx note
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
1233 if (set
&& rtx_equal_p (x
, SET_DEST (set
)))
1235 rtx src
= SET_SRC (set
);
1237 if (note
&& GET_CODE (XEXP (note
, 0)) != EXPR_LIST
)
1238 src
= XEXP (note
, 0);
1240 if ((valid_to
== NULL_RTX
1241 || ! modified_between_p (src
, PREV_INSN (p
), valid_to
))
1242 /* Reject hard registers because we don't usually want
1243 to use them; we'd rather use a pseudo. */
1245 && REGNO (src
) < FIRST_PSEUDO_REGISTER
) || allow_hwreg
))
1252 /* If set in non-simple way, we don't have a value. */
1253 if (reg_set_p (x
, p
))
1260 /* Return nonzero if register in range [REGNO, ENDREGNO)
1261 appears either explicitly or implicitly in X
1262 other than being stored into.
1264 References contained within the substructure at LOC do not count.
1265 LOC may be zero, meaning don't ignore anything. */
1268 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, const_rtx x
,
1272 unsigned int x_regno
;
1277 /* The contents of a REG_NONNEG note is always zero, so we must come here
1278 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1282 code
= GET_CODE (x
);
1287 x_regno
= REGNO (x
);
1289 /* If we modifying the stack, frame, or argument pointer, it will
1290 clobber a virtual register. In fact, we could be more precise,
1291 but it isn't worth it. */
1292 if ((x_regno
== STACK_POINTER_REGNUM
1293 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1294 || x_regno
== ARG_POINTER_REGNUM
1296 || x_regno
== FRAME_POINTER_REGNUM
)
1297 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1300 return endregno
> x_regno
&& regno
< END_REGNO (x
);
1303 /* If this is a SUBREG of a hard reg, we can see exactly which
1304 registers are being modified. Otherwise, handle normally. */
1305 if (REG_P (SUBREG_REG (x
))
1306 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1308 unsigned int inner_regno
= subreg_regno (x
);
1309 unsigned int inner_endregno
1310 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1311 ? subreg_nregs (x
) : 1);
1313 return endregno
> inner_regno
&& regno
< inner_endregno
;
1319 if (&SET_DEST (x
) != loc
1320 /* Note setting a SUBREG counts as referring to the REG it is in for
1321 a pseudo but not for hard registers since we can
1322 treat each word individually. */
1323 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1324 && loc
!= &SUBREG_REG (SET_DEST (x
))
1325 && REG_P (SUBREG_REG (SET_DEST (x
)))
1326 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1327 && refers_to_regno_p (regno
, endregno
,
1328 SUBREG_REG (SET_DEST (x
)), loc
))
1329 || (!REG_P (SET_DEST (x
))
1330 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1333 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1342 /* X does not match, so try its subexpressions. */
1344 fmt
= GET_RTX_FORMAT (code
);
1345 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1347 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1355 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1358 else if (fmt
[i
] == 'E')
1361 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1362 if (loc
!= &XVECEXP (x
, i
, j
)
1363 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1370 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1371 we check if any register number in X conflicts with the relevant register
1372 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1373 contains a MEM (we don't bother checking for memory addresses that can't
1374 conflict because we expect this to be a rare case. */
1377 reg_overlap_mentioned_p (const_rtx x
, const_rtx in
)
1379 unsigned int regno
, endregno
;
1381 /* If either argument is a constant, then modifying X can not
1382 affect IN. Here we look at IN, we can profitably combine
1383 CONSTANT_P (x) with the switch statement below. */
1384 if (CONSTANT_P (in
))
1388 switch (GET_CODE (x
))
1390 case STRICT_LOW_PART
:
1393 /* Overly conservative. */
1398 regno
= REGNO (SUBREG_REG (x
));
1399 if (regno
< FIRST_PSEUDO_REGISTER
)
1400 regno
= subreg_regno (x
);
1401 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1402 ? subreg_nregs (x
) : 1);
1407 endregno
= END_REGNO (x
);
1409 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1419 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1420 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1423 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1426 else if (fmt
[i
] == 'E')
1429 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1430 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1440 return reg_mentioned_p (x
, in
);
1446 /* If any register in here refers to it we return true. */
1447 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1448 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1449 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1455 gcc_assert (CONSTANT_P (x
));
1460 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1461 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1462 ignored by note_stores, but passed to FUN.
1464 FUN receives three arguments:
1465 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1466 2. the SET or CLOBBER rtx that does the store,
1467 3. the pointer DATA provided to note_stores.
1469 If the item being stored in or clobbered is a SUBREG of a hard register,
1470 the SUBREG will be passed. */
1473 note_stores (const_rtx x
, void (*fun
) (rtx
, const_rtx
, void *), void *data
)
1477 if (GET_CODE (x
) == COND_EXEC
)
1478 x
= COND_EXEC_CODE (x
);
1480 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1482 rtx dest
= SET_DEST (x
);
1484 while ((GET_CODE (dest
) == SUBREG
1485 && (!REG_P (SUBREG_REG (dest
))
1486 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1487 || GET_CODE (dest
) == ZERO_EXTRACT
1488 || GET_CODE (dest
) == STRICT_LOW_PART
)
1489 dest
= XEXP (dest
, 0);
1491 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1492 each of whose first operand is a register. */
1493 if (GET_CODE (dest
) == PARALLEL
)
1495 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1496 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1497 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1500 (*fun
) (dest
, x
, data
);
1503 else if (GET_CODE (x
) == PARALLEL
)
1504 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1505 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1508 /* Like notes_stores, but call FUN for each expression that is being
1509 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1510 FUN for each expression, not any interior subexpressions. FUN receives a
1511 pointer to the expression and the DATA passed to this function.
1513 Note that this is not quite the same test as that done in reg_referenced_p
1514 since that considers something as being referenced if it is being
1515 partially set, while we do not. */
1518 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1523 switch (GET_CODE (body
))
1526 (*fun
) (&COND_EXEC_TEST (body
), data
);
1527 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1531 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1532 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1536 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1537 note_uses (&PATTERN (XVECEXP (body
, 0, i
)), fun
, data
);
1541 (*fun
) (&XEXP (body
, 0), data
);
1545 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1546 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1550 (*fun
) (&TRAP_CONDITION (body
), data
);
1554 (*fun
) (&XEXP (body
, 0), data
);
1558 case UNSPEC_VOLATILE
:
1559 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1560 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1564 if (MEM_P (XEXP (body
, 0)))
1565 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1570 rtx dest
= SET_DEST (body
);
1572 /* For sets we replace everything in source plus registers in memory
1573 expression in store and operands of a ZERO_EXTRACT. */
1574 (*fun
) (&SET_SRC (body
), data
);
1576 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1578 (*fun
) (&XEXP (dest
, 1), data
);
1579 (*fun
) (&XEXP (dest
, 2), data
);
1582 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1583 dest
= XEXP (dest
, 0);
1586 (*fun
) (&XEXP (dest
, 0), data
);
1591 /* All the other possibilities never store. */
1592 (*fun
) (pbody
, data
);
1597 /* Return nonzero if X's old contents don't survive after INSN.
1598 This will be true if X is (cc0) or if X is a register and
1599 X dies in INSN or because INSN entirely sets X.
1601 "Entirely set" means set directly and not through a SUBREG, or
1602 ZERO_EXTRACT, so no trace of the old contents remains.
1603 Likewise, REG_INC does not count.
1605 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1606 but for this use that makes no difference, since regs don't overlap
1607 during their lifetimes. Therefore, this function may be used
1608 at any time after deaths have been computed.
1610 If REG is a hard reg that occupies multiple machine registers, this
1611 function will only return 1 if each of those registers will be replaced
1615 dead_or_set_p (const_rtx insn
, const_rtx x
)
1617 unsigned int regno
, end_regno
;
1620 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1621 if (GET_CODE (x
) == CC0
)
1624 gcc_assert (REG_P (x
));
1627 end_regno
= END_REGNO (x
);
1628 for (i
= regno
; i
< end_regno
; i
++)
1629 if (! dead_or_set_regno_p (insn
, i
))
1635 /* Return TRUE iff DEST is a register or subreg of a register and
1636 doesn't change the number of words of the inner register, and any
1637 part of the register is TEST_REGNO. */
1640 covers_regno_no_parallel_p (const_rtx dest
, unsigned int test_regno
)
1642 unsigned int regno
, endregno
;
1644 if (GET_CODE (dest
) == SUBREG
1645 && (((GET_MODE_SIZE (GET_MODE (dest
))
1646 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
1647 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
1648 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
1649 dest
= SUBREG_REG (dest
);
1654 regno
= REGNO (dest
);
1655 endregno
= END_REGNO (dest
);
1656 return (test_regno
>= regno
&& test_regno
< endregno
);
1659 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1660 any member matches the covers_regno_no_parallel_p criteria. */
1663 covers_regno_p (const_rtx dest
, unsigned int test_regno
)
1665 if (GET_CODE (dest
) == PARALLEL
)
1667 /* Some targets place small structures in registers for return
1668 values of functions, and those registers are wrapped in
1669 PARALLELs that we may see as the destination of a SET. */
1672 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1674 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
1675 if (inner
!= NULL_RTX
1676 && covers_regno_no_parallel_p (inner
, test_regno
))
1683 return covers_regno_no_parallel_p (dest
, test_regno
);
1686 /* Utility function for dead_or_set_p to check an individual register. */
1689 dead_or_set_regno_p (const_rtx insn
, unsigned int test_regno
)
1693 /* See if there is a death note for something that includes TEST_REGNO. */
1694 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
1698 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
1701 pattern
= PATTERN (insn
);
1703 if (GET_CODE (pattern
) == COND_EXEC
)
1704 pattern
= COND_EXEC_CODE (pattern
);
1706 if (GET_CODE (pattern
) == SET
)
1707 return covers_regno_p (SET_DEST (pattern
), test_regno
);
1708 else if (GET_CODE (pattern
) == PARALLEL
)
1712 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
1714 rtx body
= XVECEXP (pattern
, 0, i
);
1716 if (GET_CODE (body
) == COND_EXEC
)
1717 body
= COND_EXEC_CODE (body
);
1719 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
1720 && covers_regno_p (SET_DEST (body
), test_regno
))
1728 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1729 If DATUM is nonzero, look for one whose datum is DATUM. */
1732 find_reg_note (const_rtx insn
, enum reg_note kind
, const_rtx datum
)
1736 gcc_checking_assert (insn
);
1738 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1739 if (! INSN_P (insn
))
1743 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1744 if (REG_NOTE_KIND (link
) == kind
)
1749 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1750 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
1755 /* Return the reg-note of kind KIND in insn INSN which applies to register
1756 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1757 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1758 it might be the case that the note overlaps REGNO. */
1761 find_regno_note (const_rtx insn
, enum reg_note kind
, unsigned int regno
)
1765 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1766 if (! INSN_P (insn
))
1769 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1770 if (REG_NOTE_KIND (link
) == kind
1771 /* Verify that it is a register, so that scratch and MEM won't cause a
1773 && REG_P (XEXP (link
, 0))
1774 && REGNO (XEXP (link
, 0)) <= regno
1775 && END_REGNO (XEXP (link
, 0)) > regno
)
1780 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1784 find_reg_equal_equiv_note (const_rtx insn
)
1791 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1792 if (REG_NOTE_KIND (link
) == REG_EQUAL
1793 || REG_NOTE_KIND (link
) == REG_EQUIV
)
1795 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
1796 insns that have multiple sets. Checking single_set to
1797 make sure of this is not the proper check, as explained
1798 in the comment in set_unique_reg_note.
1800 This should be changed into an assert. */
1801 if (GET_CODE (PATTERN (insn
)) == PARALLEL
&& multiple_sets (insn
))
1808 /* Check whether INSN is a single_set whose source is known to be
1809 equivalent to a constant. Return that constant if so, otherwise
1813 find_constant_src (const_rtx insn
)
1817 set
= single_set (insn
);
1820 x
= avoid_constant_pool_reference (SET_SRC (set
));
1825 note
= find_reg_equal_equiv_note (insn
);
1826 if (note
&& CONSTANT_P (XEXP (note
, 0)))
1827 return XEXP (note
, 0);
1832 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1833 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1836 find_reg_fusage (const_rtx insn
, enum rtx_code code
, const_rtx datum
)
1838 /* If it's not a CALL_INSN, it can't possibly have a
1839 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1849 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
1851 link
= XEXP (link
, 1))
1852 if (GET_CODE (XEXP (link
, 0)) == code
1853 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
1858 unsigned int regno
= REGNO (datum
);
1860 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1861 to pseudo registers, so don't bother checking. */
1863 if (regno
< FIRST_PSEUDO_REGISTER
)
1865 unsigned int end_regno
= END_HARD_REGNO (datum
);
1868 for (i
= regno
; i
< end_regno
; i
++)
1869 if (find_regno_fusage (insn
, code
, i
))
1877 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1878 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1881 find_regno_fusage (const_rtx insn
, enum rtx_code code
, unsigned int regno
)
1885 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1886 to pseudo registers, so don't bother checking. */
1888 if (regno
>= FIRST_PSEUDO_REGISTER
1892 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1896 if (GET_CODE (op
= XEXP (link
, 0)) == code
1897 && REG_P (reg
= XEXP (op
, 0))
1898 && REGNO (reg
) <= regno
1899 && END_HARD_REGNO (reg
) > regno
)
1907 /* Allocate a register note with kind KIND and datum DATUM. LIST is
1908 stored as the pointer to the next register note. */
1911 alloc_reg_note (enum reg_note kind
, rtx datum
, rtx list
)
1919 case REG_LABEL_TARGET
:
1920 case REG_LABEL_OPERAND
:
1921 /* These types of register notes use an INSN_LIST rather than an
1922 EXPR_LIST, so that copying is done right and dumps look
1924 note
= alloc_INSN_LIST (datum
, list
);
1925 PUT_REG_NOTE_KIND (note
, kind
);
1929 note
= alloc_EXPR_LIST (kind
, datum
, list
);
1936 /* Add register note with kind KIND and datum DATUM to INSN. */
1939 add_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
1941 REG_NOTES (insn
) = alloc_reg_note (kind
, datum
, REG_NOTES (insn
));
1944 /* Remove register note NOTE from the REG_NOTES of INSN. */
1947 remove_note (rtx insn
, const_rtx note
)
1951 if (note
== NULL_RTX
)
1954 if (REG_NOTES (insn
) == note
)
1955 REG_NOTES (insn
) = XEXP (note
, 1);
1957 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1958 if (XEXP (link
, 1) == note
)
1960 XEXP (link
, 1) = XEXP (note
, 1);
1964 switch (REG_NOTE_KIND (note
))
1968 df_notes_rescan (insn
);
1975 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes. */
1978 remove_reg_equal_equiv_notes (rtx insn
)
1982 loc
= ®_NOTES (insn
);
1985 enum reg_note kind
= REG_NOTE_KIND (*loc
);
1986 if (kind
== REG_EQUAL
|| kind
== REG_EQUIV
)
1987 *loc
= XEXP (*loc
, 1);
1989 loc
= &XEXP (*loc
, 1);
1993 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
1996 remove_reg_equal_equiv_notes_for_regno (unsigned int regno
)
2003 /* This loop is a little tricky. We cannot just go down the chain because
2004 it is being modified by some actions in the loop. So we just iterate
2005 over the head. We plan to drain the list anyway. */
2006 while ((eq_use
= DF_REG_EQ_USE_CHAIN (regno
)) != NULL
)
2008 rtx insn
= DF_REF_INSN (eq_use
);
2009 rtx note
= find_reg_equal_equiv_note (insn
);
2011 /* This assert is generally triggered when someone deletes a REG_EQUAL
2012 or REG_EQUIV note by hacking the list manually rather than calling
2016 remove_note (insn
, note
);
2020 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2021 return 1 if it is found. A simple equality test is used to determine if
2025 in_expr_list_p (const_rtx listp
, const_rtx node
)
2029 for (x
= listp
; x
; x
= XEXP (x
, 1))
2030 if (node
== XEXP (x
, 0))
2036 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2037 remove that entry from the list if it is found.
2039 A simple equality test is used to determine if NODE matches. */
2042 remove_node_from_expr_list (const_rtx node
, rtx
*listp
)
2045 rtx prev
= NULL_RTX
;
2049 if (node
== XEXP (temp
, 0))
2051 /* Splice the node out of the list. */
2053 XEXP (prev
, 1) = XEXP (temp
, 1);
2055 *listp
= XEXP (temp
, 1);
2061 temp
= XEXP (temp
, 1);
2065 /* Nonzero if X contains any volatile instructions. These are instructions
2066 which may cause unpredictable machine state instructions, and thus no
2067 instructions should be moved or combined across them. This includes
2068 only volatile asms and UNSPEC_VOLATILE instructions. */
2071 volatile_insn_p (const_rtx x
)
2073 const RTX_CODE code
= GET_CODE (x
);
2094 case UNSPEC_VOLATILE
:
2095 /* case TRAP_IF: This isn't clear yet. */
2100 if (MEM_VOLATILE_P (x
))
2107 /* Recursively scan the operands of this expression. */
2110 const char *const fmt
= GET_RTX_FORMAT (code
);
2113 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2117 if (volatile_insn_p (XEXP (x
, i
)))
2120 else if (fmt
[i
] == 'E')
2123 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2124 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
2132 /* Nonzero if X contains any volatile memory references
2133 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2136 volatile_refs_p (const_rtx x
)
2138 const RTX_CODE code
= GET_CODE (x
);
2157 case UNSPEC_VOLATILE
:
2163 if (MEM_VOLATILE_P (x
))
2170 /* Recursively scan the operands of this expression. */
2173 const char *const fmt
= GET_RTX_FORMAT (code
);
2176 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2180 if (volatile_refs_p (XEXP (x
, i
)))
2183 else if (fmt
[i
] == 'E')
2186 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2187 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
2195 /* Similar to above, except that it also rejects register pre- and post-
2199 side_effects_p (const_rtx x
)
2201 const RTX_CODE code
= GET_CODE (x
);
2221 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2222 when some combination can't be done. If we see one, don't think
2223 that we can simplify the expression. */
2224 return (GET_MODE (x
) != VOIDmode
);
2233 case UNSPEC_VOLATILE
:
2234 /* case TRAP_IF: This isn't clear yet. */
2240 if (MEM_VOLATILE_P (x
))
2247 /* Recursively scan the operands of this expression. */
2250 const char *fmt
= GET_RTX_FORMAT (code
);
2253 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2257 if (side_effects_p (XEXP (x
, i
)))
2260 else if (fmt
[i
] == 'E')
2263 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2264 if (side_effects_p (XVECEXP (x
, i
, j
)))
2272 /* Return nonzero if evaluating rtx X might cause a trap.
2273 FLAGS controls how to consider MEMs. A nonzero means the context
2274 of the access may have changed from the original, such that the
2275 address may have become invalid. */
2278 may_trap_p_1 (const_rtx x
, unsigned flags
)
2284 /* We make no distinction currently, but this function is part of
2285 the internal target-hooks ABI so we keep the parameter as
2286 "unsigned flags". */
2287 bool code_changed
= flags
!= 0;
2291 code
= GET_CODE (x
);
2294 /* Handle these cases quickly. */
2309 case UNSPEC_VOLATILE
:
2310 return targetm
.unspec_may_trap_p (x
, flags
);
2317 return MEM_VOLATILE_P (x
);
2319 /* Memory ref can trap unless it's a static var or a stack slot. */
2321 /* Recognize specific pattern of stack checking probes. */
2322 if (flag_stack_check
2323 && MEM_VOLATILE_P (x
)
2324 && XEXP (x
, 0) == stack_pointer_rtx
)
2326 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2327 reference; moving it out of context such as when moving code
2328 when optimizing, might cause its address to become invalid. */
2330 || !MEM_NOTRAP_P (x
))
2332 HOST_WIDE_INT size
= MEM_SIZE_KNOWN_P (x
) ? MEM_SIZE (x
) : 0;
2333 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), 0, size
,
2334 GET_MODE (x
), code_changed
);
2339 /* Division by a non-constant might trap. */
2344 if (HONOR_SNANS (GET_MODE (x
)))
2346 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2347 return flag_trapping_math
;
2348 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2353 /* An EXPR_LIST is used to represent a function call. This
2354 certainly may trap. */
2363 /* Some floating point comparisons may trap. */
2364 if (!flag_trapping_math
)
2366 /* ??? There is no machine independent way to check for tests that trap
2367 when COMPARE is used, though many targets do make this distinction.
2368 For instance, sparc uses CCFPE for compares which generate exceptions
2369 and CCFP for compares which do not generate exceptions. */
2370 if (HONOR_NANS (GET_MODE (x
)))
2372 /* But often the compare has some CC mode, so check operand
2374 if (HONOR_NANS (GET_MODE (XEXP (x
, 0)))
2375 || HONOR_NANS (GET_MODE (XEXP (x
, 1))))
2381 if (HONOR_SNANS (GET_MODE (x
)))
2383 /* Often comparison is CC mode, so check operand modes. */
2384 if (HONOR_SNANS (GET_MODE (XEXP (x
, 0)))
2385 || HONOR_SNANS (GET_MODE (XEXP (x
, 1))))
2390 /* Conversion of floating point might trap. */
2391 if (flag_trapping_math
&& HONOR_NANS (GET_MODE (XEXP (x
, 0))))
2398 /* These operations don't trap even with floating point. */
2402 /* Any floating arithmetic may trap. */
2403 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
))
2404 && flag_trapping_math
)
2408 fmt
= GET_RTX_FORMAT (code
);
2409 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2413 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2416 else if (fmt
[i
] == 'E')
2419 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2420 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2427 /* Return nonzero if evaluating rtx X might cause a trap. */
2430 may_trap_p (const_rtx x
)
2432 return may_trap_p_1 (x
, 0);
2435 /* Same as above, but additionally return nonzero if evaluating rtx X might
2436 cause a fault. We define a fault for the purpose of this function as a
2437 erroneous execution condition that cannot be encountered during the normal
2438 execution of a valid program; the typical example is an unaligned memory
2439 access on a strict alignment machine. The compiler guarantees that it
2440 doesn't generate code that will fault from a valid program, but this
2441 guarantee doesn't mean anything for individual instructions. Consider
2442 the following example:
2444 struct S { int d; union { char *cp; int *ip; }; };
2446 int foo(struct S *s)
2454 on a strict alignment machine. In a valid program, foo will never be
2455 invoked on a structure for which d is equal to 1 and the underlying
2456 unique field of the union not aligned on a 4-byte boundary, but the
2457 expression *s->ip might cause a fault if considered individually.
2459 At the RTL level, potentially problematic expressions will almost always
2460 verify may_trap_p; for example, the above dereference can be emitted as
2461 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2462 However, suppose that foo is inlined in a caller that causes s->cp to
2463 point to a local character variable and guarantees that s->d is not set
2464 to 1; foo may have been effectively translated into pseudo-RTL as:
2467 (set (reg:SI) (mem:SI (%fp - 7)))
2469 (set (reg:QI) (mem:QI (%fp - 7)))
2471 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2472 memory reference to a stack slot, but it will certainly cause a fault
2473 on a strict alignment machine. */
2476 may_trap_or_fault_p (const_rtx x
)
2478 return may_trap_p_1 (x
, 1);
2481 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2482 i.e., an inequality. */
2485 inequality_comparisons_p (const_rtx x
)
2489 const enum rtx_code code
= GET_CODE (x
);
2520 len
= GET_RTX_LENGTH (code
);
2521 fmt
= GET_RTX_FORMAT (code
);
2523 for (i
= 0; i
< len
; i
++)
2527 if (inequality_comparisons_p (XEXP (x
, i
)))
2530 else if (fmt
[i
] == 'E')
2533 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2534 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2542 /* Replace any occurrence of FROM in X with TO. The function does
2543 not enter into CONST_DOUBLE for the replace.
2545 Note that copying is not done so X must not be shared unless all copies
2546 are to be modified. */
2549 replace_rtx (rtx x
, rtx from
, rtx to
)
2554 /* The following prevents loops occurrence when we change MEM in
2555 CONST_DOUBLE onto the same CONST_DOUBLE. */
2556 if (x
!= 0 && GET_CODE (x
) == CONST_DOUBLE
)
2562 /* Allow this function to make replacements in EXPR_LISTs. */
2566 if (GET_CODE (x
) == SUBREG
)
2568 rtx new_rtx
= replace_rtx (SUBREG_REG (x
), from
, to
);
2570 if (CONST_INT_P (new_rtx
))
2572 x
= simplify_subreg (GET_MODE (x
), new_rtx
,
2573 GET_MODE (SUBREG_REG (x
)),
2578 SUBREG_REG (x
) = new_rtx
;
2582 else if (GET_CODE (x
) == ZERO_EXTEND
)
2584 rtx new_rtx
= replace_rtx (XEXP (x
, 0), from
, to
);
2586 if (CONST_INT_P (new_rtx
))
2588 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
2589 new_rtx
, GET_MODE (XEXP (x
, 0)));
2593 XEXP (x
, 0) = new_rtx
;
2598 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2599 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2602 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
);
2603 else if (fmt
[i
] == 'E')
2604 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2605 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
), from
, to
);
2611 /* Replace occurrences of the old label in *X with the new one.
2612 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2615 replace_label (rtx
*x
, void *data
)
2618 rtx old_label
= ((replace_label_data
*) data
)->r1
;
2619 rtx new_label
= ((replace_label_data
*) data
)->r2
;
2620 bool update_label_nuses
= ((replace_label_data
*) data
)->update_label_nuses
;
2625 if (GET_CODE (l
) == SYMBOL_REF
2626 && CONSTANT_POOL_ADDRESS_P (l
))
2628 rtx c
= get_pool_constant (l
);
2629 if (rtx_referenced_p (old_label
, c
))
2632 replace_label_data
*d
= (replace_label_data
*) data
;
2634 /* Create a copy of constant C; replace the label inside
2635 but do not update LABEL_NUSES because uses in constant pool
2637 new_c
= copy_rtx (c
);
2638 d
->update_label_nuses
= false;
2639 for_each_rtx (&new_c
, replace_label
, data
);
2640 d
->update_label_nuses
= update_label_nuses
;
2642 /* Add the new constant NEW_C to constant pool and replace
2643 the old reference to constant by new reference. */
2644 new_l
= XEXP (force_const_mem (get_pool_mode (l
), new_c
), 0);
2645 *x
= replace_rtx (l
, l
, new_l
);
2650 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2651 field. This is not handled by for_each_rtx because it doesn't
2652 handle unprinted ('0') fields. */
2653 if (JUMP_P (l
) && JUMP_LABEL (l
) == old_label
)
2654 JUMP_LABEL (l
) = new_label
;
2656 if ((GET_CODE (l
) == LABEL_REF
2657 || GET_CODE (l
) == INSN_LIST
)
2658 && XEXP (l
, 0) == old_label
)
2660 XEXP (l
, 0) = new_label
;
2661 if (update_label_nuses
)
2663 ++LABEL_NUSES (new_label
);
2664 --LABEL_NUSES (old_label
);
2672 /* When *BODY is equal to X or X is directly referenced by *BODY
2673 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2674 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2677 rtx_referenced_p_1 (rtx
*body
, void *x
)
2681 if (*body
== NULL_RTX
)
2682 return y
== NULL_RTX
;
2684 /* Return true if a label_ref *BODY refers to label Y. */
2685 if (GET_CODE (*body
) == LABEL_REF
&& LABEL_P (y
))
2686 return XEXP (*body
, 0) == y
;
2688 /* If *BODY is a reference to pool constant traverse the constant. */
2689 if (GET_CODE (*body
) == SYMBOL_REF
2690 && CONSTANT_POOL_ADDRESS_P (*body
))
2691 return rtx_referenced_p (y
, get_pool_constant (*body
));
2693 /* By default, compare the RTL expressions. */
2694 return rtx_equal_p (*body
, y
);
2697 /* Return true if X is referenced in BODY. */
2700 rtx_referenced_p (rtx x
, rtx body
)
2702 return for_each_rtx (&body
, rtx_referenced_p_1
, x
);
2705 /* If INSN is a tablejump return true and store the label (before jump table) to
2706 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2709 tablejump_p (const_rtx insn
, rtx
*labelp
, rtx
*tablep
)
2716 label
= JUMP_LABEL (insn
);
2717 if (label
!= NULL_RTX
&& !ANY_RETURN_P (label
)
2718 && (table
= next_active_insn (label
)) != NULL_RTX
2719 && JUMP_TABLE_DATA_P (table
))
2730 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2731 constant that is not in the constant pool and not in the condition
2732 of an IF_THEN_ELSE. */
2735 computed_jump_p_1 (const_rtx x
)
2737 const enum rtx_code code
= GET_CODE (x
);
2757 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
2758 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
2761 return (computed_jump_p_1 (XEXP (x
, 1))
2762 || computed_jump_p_1 (XEXP (x
, 2)));
2768 fmt
= GET_RTX_FORMAT (code
);
2769 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2772 && computed_jump_p_1 (XEXP (x
, i
)))
2775 else if (fmt
[i
] == 'E')
2776 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2777 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
2784 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2786 Tablejumps and casesi insns are not considered indirect jumps;
2787 we can recognize them by a (use (label_ref)). */
2790 computed_jump_p (const_rtx insn
)
2795 rtx pat
= PATTERN (insn
);
2797 /* If we have a JUMP_LABEL set, we're not a computed jump. */
2798 if (JUMP_LABEL (insn
) != NULL
)
2801 if (GET_CODE (pat
) == PARALLEL
)
2803 int len
= XVECLEN (pat
, 0);
2804 int has_use_labelref
= 0;
2806 for (i
= len
- 1; i
>= 0; i
--)
2807 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
2808 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
2810 has_use_labelref
= 1;
2812 if (! has_use_labelref
)
2813 for (i
= len
- 1; i
>= 0; i
--)
2814 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
2815 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
2816 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
2819 else if (GET_CODE (pat
) == SET
2820 && SET_DEST (pat
) == pc_rtx
2821 && computed_jump_p_1 (SET_SRC (pat
)))
2827 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2828 calls. Processes the subexpressions of EXP and passes them to F. */
2830 for_each_rtx_1 (rtx exp
, int n
, rtx_function f
, void *data
)
2833 const char *format
= GET_RTX_FORMAT (GET_CODE (exp
));
2836 for (; format
[n
] != '\0'; n
++)
2843 result
= (*f
) (x
, data
);
2845 /* Do not traverse sub-expressions. */
2847 else if (result
!= 0)
2848 /* Stop the traversal. */
2852 /* There are no sub-expressions. */
2855 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2858 result
= for_each_rtx_1 (*x
, i
, f
, data
);
2866 if (XVEC (exp
, n
) == 0)
2868 for (j
= 0; j
< XVECLEN (exp
, n
); ++j
)
2871 x
= &XVECEXP (exp
, n
, j
);
2872 result
= (*f
) (x
, data
);
2874 /* Do not traverse sub-expressions. */
2876 else if (result
!= 0)
2877 /* Stop the traversal. */
2881 /* There are no sub-expressions. */
2884 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2887 result
= for_each_rtx_1 (*x
, i
, f
, data
);
2895 /* Nothing to do. */
2903 /* Traverse X via depth-first search, calling F for each
2904 sub-expression (including X itself). F is also passed the DATA.
2905 If F returns -1, do not traverse sub-expressions, but continue
2906 traversing the rest of the tree. If F ever returns any other
2907 nonzero value, stop the traversal, and return the value returned
2908 by F. Otherwise, return 0. This function does not traverse inside
2909 tree structure that contains RTX_EXPRs, or into sub-expressions
2910 whose format code is `0' since it is not known whether or not those
2911 codes are actually RTL.
2913 This routine is very general, and could (should?) be used to
2914 implement many of the other routines in this file. */
2917 for_each_rtx (rtx
*x
, rtx_function f
, void *data
)
2923 result
= (*f
) (x
, data
);
2925 /* Do not traverse sub-expressions. */
2927 else if (result
!= 0)
2928 /* Stop the traversal. */
2932 /* There are no sub-expressions. */
2935 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2939 return for_each_rtx_1 (*x
, i
, f
, data
);
2944 /* Data structure that holds the internal state communicated between
2945 for_each_inc_dec, for_each_inc_dec_find_mem and
2946 for_each_inc_dec_find_inc_dec. */
2948 struct for_each_inc_dec_ops
{
2949 /* The function to be called for each autoinc operation found. */
2950 for_each_inc_dec_fn fn
;
2951 /* The opaque argument to be passed to it. */
2953 /* The MEM we're visiting, if any. */
2957 static int for_each_inc_dec_find_mem (rtx
*r
, void *d
);
2959 /* Find PRE/POST-INC/DEC/MODIFY operations within *R, extract the
2960 operands of the equivalent add insn and pass the result to the
2961 operator specified by *D. */
2964 for_each_inc_dec_find_inc_dec (rtx
*r
, void *d
)
2967 struct for_each_inc_dec_ops
*data
= (struct for_each_inc_dec_ops
*)d
;
2969 switch (GET_CODE (x
))
2974 int size
= GET_MODE_SIZE (GET_MODE (data
->mem
));
2975 rtx r1
= XEXP (x
, 0);
2976 rtx c
= gen_int_mode (size
, GET_MODE (r1
));
2977 return data
->fn (data
->mem
, x
, r1
, r1
, c
, data
->arg
);
2983 int size
= GET_MODE_SIZE (GET_MODE (data
->mem
));
2984 rtx r1
= XEXP (x
, 0);
2985 rtx c
= gen_int_mode (-size
, GET_MODE (r1
));
2986 return data
->fn (data
->mem
, x
, r1
, r1
, c
, data
->arg
);
2992 rtx r1
= XEXP (x
, 0);
2993 rtx add
= XEXP (x
, 1);
2994 return data
->fn (data
->mem
, x
, r1
, add
, NULL
, data
->arg
);
2999 rtx save
= data
->mem
;
3000 int ret
= for_each_inc_dec_find_mem (r
, d
);
3010 /* If *R is a MEM, find PRE/POST-INC/DEC/MODIFY operations within its
3011 address, extract the operands of the equivalent add insn and pass
3012 the result to the operator specified by *D. */
3015 for_each_inc_dec_find_mem (rtx
*r
, void *d
)
3018 if (x
!= NULL_RTX
&& MEM_P (x
))
3020 struct for_each_inc_dec_ops
*data
= (struct for_each_inc_dec_ops
*) d
;
3025 result
= for_each_rtx (&XEXP (x
, 0), for_each_inc_dec_find_inc_dec
,
3035 /* Traverse *X looking for MEMs, and for autoinc operations within
3036 them. For each such autoinc operation found, call FN, passing it
3037 the innermost enclosing MEM, the operation itself, the RTX modified
3038 by the operation, two RTXs (the second may be NULL) that, once
3039 added, represent the value to be held by the modified RTX
3040 afterwards, and ARG. FN is to return -1 to skip looking for other
3041 autoinc operations within the visited operation, 0 to continue the
3042 traversal, or any other value to have it returned to the caller of
3043 for_each_inc_dec. */
3046 for_each_inc_dec (rtx
*x
,
3047 for_each_inc_dec_fn fn
,
3050 struct for_each_inc_dec_ops data
;
3056 return for_each_rtx (x
, for_each_inc_dec_find_mem
, &data
);
3060 /* Searches X for any reference to REGNO, returning the rtx of the
3061 reference found if any. Otherwise, returns NULL_RTX. */
3064 regno_use_in (unsigned int regno
, rtx x
)
3070 if (REG_P (x
) && REGNO (x
) == regno
)
3073 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
3074 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
3078 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
3081 else if (fmt
[i
] == 'E')
3082 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3083 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
3090 /* Return a value indicating whether OP, an operand of a commutative
3091 operation, is preferred as the first or second operand. The higher
3092 the value, the stronger the preference for being the first operand.
3093 We use negative values to indicate a preference for the first operand
3094 and positive values for the second operand. */
3097 commutative_operand_precedence (rtx op
)
3099 enum rtx_code code
= GET_CODE (op
);
3101 /* Constants always come the second operand. Prefer "nice" constants. */
3102 if (code
== CONST_INT
)
3104 if (code
== CONST_DOUBLE
)
3106 if (code
== CONST_FIXED
)
3108 op
= avoid_constant_pool_reference (op
);
3109 code
= GET_CODE (op
);
3111 switch (GET_RTX_CLASS (code
))
3114 if (code
== CONST_INT
)
3116 if (code
== CONST_DOUBLE
)
3118 if (code
== CONST_FIXED
)
3123 /* SUBREGs of objects should come second. */
3124 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
3129 /* Complex expressions should be the first, so decrease priority
3130 of objects. Prefer pointer objects over non pointer objects. */
3131 if ((REG_P (op
) && REG_POINTER (op
))
3132 || (MEM_P (op
) && MEM_POINTER (op
)))
3136 case RTX_COMM_ARITH
:
3137 /* Prefer operands that are themselves commutative to be first.
3138 This helps to make things linear. In particular,
3139 (and (and (reg) (reg)) (not (reg))) is canonical. */
3143 /* If only one operand is a binary expression, it will be the first
3144 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3145 is canonical, although it will usually be further simplified. */
3149 /* Then prefer NEG and NOT. */
3150 if (code
== NEG
|| code
== NOT
)
3158 /* Return 1 iff it is necessary to swap operands of commutative operation
3159 in order to canonicalize expression. */
3162 swap_commutative_operands_p (rtx x
, rtx y
)
3164 return (commutative_operand_precedence (x
)
3165 < commutative_operand_precedence (y
));
3168 /* Return 1 if X is an autoincrement side effect and the register is
3169 not the stack pointer. */
3171 auto_inc_p (const_rtx x
)
3173 switch (GET_CODE (x
))
3181 /* There are no REG_INC notes for SP. */
3182 if (XEXP (x
, 0) != stack_pointer_rtx
)
3190 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3192 loc_mentioned_in_p (rtx
*loc
, const_rtx in
)
3201 code
= GET_CODE (in
);
3202 fmt
= GET_RTX_FORMAT (code
);
3203 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3207 if (loc
== &XEXP (in
, i
) || loc_mentioned_in_p (loc
, XEXP (in
, i
)))
3210 else if (fmt
[i
] == 'E')
3211 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
3212 if (loc
== &XVECEXP (in
, i
, j
)
3213 || loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
3219 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3220 and SUBREG_BYTE, return the bit offset where the subreg begins
3221 (counting from the least significant bit of the operand). */
3224 subreg_lsb_1 (enum machine_mode outer_mode
,
3225 enum machine_mode inner_mode
,
3226 unsigned int subreg_byte
)
3228 unsigned int bitpos
;
3232 /* A paradoxical subreg begins at bit position 0. */
3233 if (GET_MODE_PRECISION (outer_mode
) > GET_MODE_PRECISION (inner_mode
))
3236 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
3237 /* If the subreg crosses a word boundary ensure that
3238 it also begins and ends on a word boundary. */
3239 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
3240 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
3241 && (subreg_byte
% UNITS_PER_WORD
3242 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
3244 if (WORDS_BIG_ENDIAN
)
3245 word
= (GET_MODE_SIZE (inner_mode
)
3246 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
3248 word
= subreg_byte
/ UNITS_PER_WORD
;
3249 bitpos
= word
* BITS_PER_WORD
;
3251 if (BYTES_BIG_ENDIAN
)
3252 byte
= (GET_MODE_SIZE (inner_mode
)
3253 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
3255 byte
= subreg_byte
% UNITS_PER_WORD
;
3256 bitpos
+= byte
* BITS_PER_UNIT
;
3261 /* Given a subreg X, return the bit offset where the subreg begins
3262 (counting from the least significant bit of the reg). */
3265 subreg_lsb (const_rtx x
)
3267 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
3271 /* Fill in information about a subreg of a hard register.
3272 xregno - A regno of an inner hard subreg_reg (or what will become one).
3273 xmode - The mode of xregno.
3274 offset - The byte offset.
3275 ymode - The mode of a top level SUBREG (or what may become one).
3276 info - Pointer to structure to fill in. */
3278 subreg_get_info (unsigned int xregno
, enum machine_mode xmode
,
3279 unsigned int offset
, enum machine_mode ymode
,
3280 struct subreg_info
*info
)
3282 int nregs_xmode
, nregs_ymode
;
3283 int mode_multiple
, nregs_multiple
;
3284 int offset_adj
, y_offset
, y_offset_adj
;
3285 int regsize_xmode
, regsize_ymode
;
3288 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
3292 /* If there are holes in a non-scalar mode in registers, we expect
3293 that it is made up of its units concatenated together. */
3294 if (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
))
3296 enum machine_mode xmode_unit
;
3298 nregs_xmode
= HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode
);
3299 if (GET_MODE_INNER (xmode
) == VOIDmode
)
3302 xmode_unit
= GET_MODE_INNER (xmode
);
3303 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode_unit
));
3304 gcc_assert (nregs_xmode
3305 == (GET_MODE_NUNITS (xmode
)
3306 * HARD_REGNO_NREGS_WITH_PADDING (xregno
, xmode_unit
)));
3307 gcc_assert (hard_regno_nregs
[xregno
][xmode
]
3308 == (hard_regno_nregs
[xregno
][xmode_unit
]
3309 * GET_MODE_NUNITS (xmode
)));
3311 /* You can only ask for a SUBREG of a value with holes in the middle
3312 if you don't cross the holes. (Such a SUBREG should be done by
3313 picking a different register class, or doing it in memory if
3314 necessary.) An example of a value with holes is XCmode on 32-bit
3315 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3316 3 for each part, but in memory it's two 128-bit parts.
3317 Padding is assumed to be at the end (not necessarily the 'high part')
3319 if ((offset
/ GET_MODE_SIZE (xmode_unit
) + 1
3320 < GET_MODE_NUNITS (xmode
))
3321 && (offset
/ GET_MODE_SIZE (xmode_unit
)
3322 != ((offset
+ GET_MODE_SIZE (ymode
) - 1)
3323 / GET_MODE_SIZE (xmode_unit
))))
3325 info
->representable_p
= false;
3330 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3332 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3334 /* Paradoxical subregs are otherwise valid. */
3337 && GET_MODE_PRECISION (ymode
) > GET_MODE_PRECISION (xmode
))
3339 info
->representable_p
= true;
3340 /* If this is a big endian paradoxical subreg, which uses more
3341 actual hard registers than the original register, we must
3342 return a negative offset so that we find the proper highpart
3344 if (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
3345 ? REG_WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
)
3346 info
->offset
= nregs_xmode
- nregs_ymode
;
3349 info
->nregs
= nregs_ymode
;
3353 /* If registers store different numbers of bits in the different
3354 modes, we cannot generally form this subreg. */
3355 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno
, xmode
)
3356 && !HARD_REGNO_NREGS_HAS_PADDING (xregno
, ymode
)
3357 && (GET_MODE_SIZE (xmode
) % nregs_xmode
) == 0
3358 && (GET_MODE_SIZE (ymode
) % nregs_ymode
) == 0)
3360 regsize_xmode
= GET_MODE_SIZE (xmode
) / nregs_xmode
;
3361 regsize_ymode
= GET_MODE_SIZE (ymode
) / nregs_ymode
;
3362 if (!rknown
&& regsize_xmode
> regsize_ymode
&& nregs_ymode
> 1)
3364 info
->representable_p
= false;
3366 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3367 info
->offset
= offset
/ regsize_xmode
;
3370 if (!rknown
&& regsize_ymode
> regsize_xmode
&& nregs_xmode
> 1)
3372 info
->representable_p
= false;
3374 = (GET_MODE_SIZE (ymode
) + regsize_xmode
- 1) / regsize_xmode
;
3375 info
->offset
= offset
/ regsize_xmode
;
3380 /* Lowpart subregs are otherwise valid. */
3381 if (!rknown
&& offset
== subreg_lowpart_offset (ymode
, xmode
))
3383 info
->representable_p
= true;
3386 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
3389 info
->nregs
= nregs_ymode
;
3394 /* This should always pass, otherwise we don't know how to verify
3395 the constraint. These conditions may be relaxed but
3396 subreg_regno_offset would need to be redesigned. */
3397 gcc_assert ((GET_MODE_SIZE (xmode
) % GET_MODE_SIZE (ymode
)) == 0);
3398 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3400 if (WORDS_BIG_ENDIAN
!= REG_WORDS_BIG_ENDIAN
3401 && GET_MODE_SIZE (xmode
) > UNITS_PER_WORD
)
3403 HOST_WIDE_INT xsize
= GET_MODE_SIZE (xmode
);
3404 HOST_WIDE_INT ysize
= GET_MODE_SIZE (ymode
);
3405 HOST_WIDE_INT off_low
= offset
& (ysize
- 1);
3406 HOST_WIDE_INT off_high
= offset
& ~(ysize
- 1);
3407 offset
= (xsize
- ysize
- off_high
) | off_low
;
3409 /* The XMODE value can be seen as a vector of NREGS_XMODE
3410 values. The subreg must represent a lowpart of given field.
3411 Compute what field it is. */
3412 offset_adj
= offset
;
3413 offset_adj
-= subreg_lowpart_offset (ymode
,
3414 mode_for_size (GET_MODE_BITSIZE (xmode
)
3418 /* Size of ymode must not be greater than the size of xmode. */
3419 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
3420 gcc_assert (mode_multiple
!= 0);
3422 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
3423 y_offset_adj
= offset_adj
/ GET_MODE_SIZE (ymode
);
3424 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
3426 gcc_assert ((offset_adj
% GET_MODE_SIZE (ymode
)) == 0);
3427 gcc_assert ((mode_multiple
% nregs_multiple
) == 0);
3431 info
->representable_p
= (!(y_offset_adj
% (mode_multiple
/ nregs_multiple
)));
3434 info
->offset
= (y_offset
/ (mode_multiple
/ nregs_multiple
)) * nregs_ymode
;
3435 info
->nregs
= nregs_ymode
;
3438 /* This function returns the regno offset of a subreg expression.
3439 xregno - A regno of an inner hard subreg_reg (or what will become one).
3440 xmode - The mode of xregno.
3441 offset - The byte offset.
3442 ymode - The mode of a top level SUBREG (or what may become one).
3443 RETURN - The regno offset which would be used. */
3445 subreg_regno_offset (unsigned int xregno
, enum machine_mode xmode
,
3446 unsigned int offset
, enum machine_mode ymode
)
3448 struct subreg_info info
;
3449 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3453 /* This function returns true when the offset is representable via
3454 subreg_offset in the given regno.
3455 xregno - A regno of an inner hard subreg_reg (or what will become one).
3456 xmode - The mode of xregno.
3457 offset - The byte offset.
3458 ymode - The mode of a top level SUBREG (or what may become one).
3459 RETURN - Whether the offset is representable. */
3461 subreg_offset_representable_p (unsigned int xregno
, enum machine_mode xmode
,
3462 unsigned int offset
, enum machine_mode ymode
)
3464 struct subreg_info info
;
3465 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3466 return info
.representable_p
;
3469 /* Return the number of a YMODE register to which
3471 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3473 can be simplified. Return -1 if the subreg can't be simplified.
3475 XREGNO is a hard register number. */
3478 simplify_subreg_regno (unsigned int xregno
, enum machine_mode xmode
,
3479 unsigned int offset
, enum machine_mode ymode
)
3481 struct subreg_info info
;
3482 unsigned int yregno
;
3484 #ifdef CANNOT_CHANGE_MODE_CLASS
3485 /* Give the backend a chance to disallow the mode change. */
3486 if (GET_MODE_CLASS (xmode
) != MODE_COMPLEX_INT
3487 && GET_MODE_CLASS (xmode
) != MODE_COMPLEX_FLOAT
3488 && REG_CANNOT_CHANGE_MODE_P (xregno
, xmode
, ymode
))
3492 /* We shouldn't simplify stack-related registers. */
3493 if ((!reload_completed
|| frame_pointer_needed
)
3494 && xregno
== FRAME_POINTER_REGNUM
)
3497 if (FRAME_POINTER_REGNUM
!= ARG_POINTER_REGNUM
3498 && xregno
== ARG_POINTER_REGNUM
)
3501 if (xregno
== STACK_POINTER_REGNUM
)
3504 /* Try to get the register offset. */
3505 subreg_get_info (xregno
, xmode
, offset
, ymode
, &info
);
3506 if (!info
.representable_p
)
3509 /* Make sure that the offsetted register value is in range. */
3510 yregno
= xregno
+ info
.offset
;
3511 if (!HARD_REGISTER_NUM_P (yregno
))
3514 /* See whether (reg:YMODE YREGNO) is valid.
3516 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3517 This is a kludge to work around how complex FP arguments are passed
3518 on IA-64 and should be fixed. See PR target/49226. */
3519 if (!HARD_REGNO_MODE_OK (yregno
, ymode
)
3520 && HARD_REGNO_MODE_OK (xregno
, xmode
))
3523 return (int) yregno
;
3526 /* Return the final regno that a subreg expression refers to. */
3528 subreg_regno (const_rtx x
)
3531 rtx subreg
= SUBREG_REG (x
);
3532 int regno
= REGNO (subreg
);
3534 ret
= regno
+ subreg_regno_offset (regno
,
3542 /* Return the number of registers that a subreg expression refers
3545 subreg_nregs (const_rtx x
)
3547 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x
)), x
);
3550 /* Return the number of registers that a subreg REG with REGNO
3551 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3552 changed so that the regno can be passed in. */
3555 subreg_nregs_with_regno (unsigned int regno
, const_rtx x
)
3557 struct subreg_info info
;
3558 rtx subreg
= SUBREG_REG (x
);
3560 subreg_get_info (regno
, GET_MODE (subreg
), SUBREG_BYTE (x
), GET_MODE (x
),
3566 struct parms_set_data
3572 /* Helper function for noticing stores to parameter registers. */
3574 parms_set (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
3576 struct parms_set_data
*const d
= (struct parms_set_data
*) data
;
3577 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3578 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3580 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3585 /* Look backward for first parameter to be loaded.
3586 Note that loads of all parameters will not necessarily be
3587 found if CSE has eliminated some of them (e.g., an argument
3588 to the outer function is passed down as a parameter).
3589 Do not skip BOUNDARY. */
3591 find_first_parameter_load (rtx call_insn
, rtx boundary
)
3593 struct parms_set_data parm
;
3594 rtx p
, before
, first_set
;
3596 /* Since different machines initialize their parameter registers
3597 in different orders, assume nothing. Collect the set of all
3598 parameter registers. */
3599 CLEAR_HARD_REG_SET (parm
.regs
);
3601 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3602 if (GET_CODE (XEXP (p
, 0)) == USE
3603 && REG_P (XEXP (XEXP (p
, 0), 0)))
3605 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3607 /* We only care about registers which can hold function
3609 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3612 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3616 first_set
= call_insn
;
3618 /* Search backward for the first set of a register in this set. */
3619 while (parm
.nregs
&& before
!= boundary
)
3621 before
= PREV_INSN (before
);
3623 /* It is possible that some loads got CSEed from one call to
3624 another. Stop in that case. */
3625 if (CALL_P (before
))
3628 /* Our caller needs either ensure that we will find all sets
3629 (in case code has not been optimized yet), or take care
3630 for possible labels in a way by setting boundary to preceding
3632 if (LABEL_P (before
))
3634 gcc_assert (before
== boundary
);
3638 if (INSN_P (before
))
3640 int nregs_old
= parm
.nregs
;
3641 note_stores (PATTERN (before
), parms_set
, &parm
);
3642 /* If we found something that did not set a parameter reg,
3643 we're done. Do not keep going, as that might result
3644 in hoisting an insn before the setting of a pseudo
3645 that is used by the hoisted insn. */
3646 if (nregs_old
!= parm
.nregs
)
3655 /* Return true if we should avoid inserting code between INSN and preceding
3656 call instruction. */
3659 keep_with_call_p (const_rtx insn
)
3663 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
3665 if (REG_P (SET_DEST (set
))
3666 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
3667 && fixed_regs
[REGNO (SET_DEST (set
))]
3668 && general_operand (SET_SRC (set
), VOIDmode
))
3670 if (REG_P (SET_SRC (set
))
3671 && targetm
.calls
.function_value_regno_p (REGNO (SET_SRC (set
)))
3672 && REG_P (SET_DEST (set
))
3673 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3675 /* There may be a stack pop just after the call and before the store
3676 of the return register. Search for the actual store when deciding
3677 if we can break or not. */
3678 if (SET_DEST (set
) == stack_pointer_rtx
)
3680 /* This CONST_CAST is okay because next_nonnote_insn just
3681 returns its argument and we assign it to a const_rtx
3683 const_rtx i2
= next_nonnote_insn (CONST_CAST_RTX(insn
));
3684 if (i2
&& keep_with_call_p (i2
))
3691 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3692 to non-complex jumps. That is, direct unconditional, conditional,
3693 and tablejumps, but not computed jumps or returns. It also does
3694 not apply to the fallthru case of a conditional jump. */
3697 label_is_jump_target_p (const_rtx label
, const_rtx jump_insn
)
3699 rtx tmp
= JUMP_LABEL (jump_insn
);
3704 if (tablejump_p (jump_insn
, NULL
, &tmp
))
3706 rtvec vec
= XVEC (PATTERN (tmp
),
3707 GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
);
3708 int i
, veclen
= GET_NUM_ELEM (vec
);
3710 for (i
= 0; i
< veclen
; ++i
)
3711 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
3715 if (find_reg_note (jump_insn
, REG_LABEL_TARGET
, label
))
3722 /* Return an estimate of the cost of computing rtx X.
3723 One use is in cse, to decide which expression to keep in the hash table.
3724 Another is in rtl generation, to pick the cheapest way to multiply.
3725 Other uses like the latter are expected in the future.
3727 X appears as operand OPNO in an expression with code OUTER_CODE.
3728 SPEED specifies whether costs optimized for speed or size should
3732 rtx_cost (rtx x
, enum rtx_code outer_code
, int opno
, bool speed
)
3742 /* Compute the default costs of certain things.
3743 Note that targetm.rtx_costs can override the defaults. */
3745 code
= GET_CODE (x
);
3749 total
= COSTS_N_INSNS (5);
3755 total
= COSTS_N_INSNS (7);
3758 /* Used in combine.c as a marker. */
3762 total
= COSTS_N_INSNS (1);
3772 /* If we can't tie these modes, make this expensive. The larger
3773 the mode, the more expensive it is. */
3774 if (! MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (SUBREG_REG (x
))))
3775 return COSTS_N_INSNS (2
3776 + GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
);
3780 if (targetm
.rtx_costs (x
, code
, outer_code
, opno
, &total
, speed
))
3785 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3786 which is already in total. */
3788 fmt
= GET_RTX_FORMAT (code
);
3789 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3791 total
+= rtx_cost (XEXP (x
, i
), code
, i
, speed
);
3792 else if (fmt
[i
] == 'E')
3793 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3794 total
+= rtx_cost (XVECEXP (x
, i
, j
), code
, i
, speed
);
3799 /* Fill in the structure C with information about both speed and size rtx
3800 costs for X, which is operand OPNO in an expression with code OUTER. */
3803 get_full_rtx_cost (rtx x
, enum rtx_code outer
, int opno
,
3804 struct full_rtx_costs
*c
)
3806 c
->speed
= rtx_cost (x
, outer
, opno
, true);
3807 c
->size
= rtx_cost (x
, outer
, opno
, false);
3811 /* Return cost of address expression X.
3812 Expect that X is properly formed address reference.
3814 SPEED parameter specify whether costs optimized for speed or size should
3818 address_cost (rtx x
, enum machine_mode mode
, addr_space_t as
, bool speed
)
3820 /* We may be asked for cost of various unusual addresses, such as operands
3821 of push instruction. It is not worthwhile to complicate writing
3822 of the target hook by such cases. */
3824 if (!memory_address_addr_space_p (mode
, x
, as
))
3827 return targetm
.address_cost (x
, speed
);
3830 /* If the target doesn't override, compute the cost as with arithmetic. */
3833 default_address_cost (rtx x
, bool speed
)
3835 return rtx_cost (x
, MEM
, 0, speed
);
3839 unsigned HOST_WIDE_INT
3840 nonzero_bits (const_rtx x
, enum machine_mode mode
)
3842 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3846 num_sign_bit_copies (const_rtx x
, enum machine_mode mode
)
3848 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3851 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3852 It avoids exponential behavior in nonzero_bits1 when X has
3853 identical subexpressions on the first or the second level. */
3855 static unsigned HOST_WIDE_INT
3856 cached_nonzero_bits (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
3857 enum machine_mode known_mode
,
3858 unsigned HOST_WIDE_INT known_ret
)
3860 if (x
== known_x
&& mode
== known_mode
)
3863 /* Try to find identical subexpressions. If found call
3864 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3865 precomputed value for the subexpression as KNOWN_RET. */
3867 if (ARITHMETIC_P (x
))
3869 rtx x0
= XEXP (x
, 0);
3870 rtx x1
= XEXP (x
, 1);
3872 /* Check the first level. */
3874 return nonzero_bits1 (x
, mode
, x0
, mode
,
3875 cached_nonzero_bits (x0
, mode
, known_x
,
3876 known_mode
, known_ret
));
3878 /* Check the second level. */
3879 if (ARITHMETIC_P (x0
)
3880 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
3881 return nonzero_bits1 (x
, mode
, x1
, mode
,
3882 cached_nonzero_bits (x1
, mode
, known_x
,
3883 known_mode
, known_ret
));
3885 if (ARITHMETIC_P (x1
)
3886 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
3887 return nonzero_bits1 (x
, mode
, x0
, mode
,
3888 cached_nonzero_bits (x0
, mode
, known_x
,
3889 known_mode
, known_ret
));
3892 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
3895 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3896 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3897 is less useful. We can't allow both, because that results in exponential
3898 run time recursion. There is a nullstone testcase that triggered
3899 this. This macro avoids accidental uses of num_sign_bit_copies. */
3900 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3902 /* Given an expression, X, compute which bits in X can be nonzero.
3903 We don't care about bits outside of those defined in MODE.
3905 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3906 an arithmetic operation, we can do better. */
3908 static unsigned HOST_WIDE_INT
3909 nonzero_bits1 (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
3910 enum machine_mode known_mode
,
3911 unsigned HOST_WIDE_INT known_ret
)
3913 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
3914 unsigned HOST_WIDE_INT inner_nz
;
3916 enum machine_mode inner_mode
;
3917 unsigned int mode_width
= GET_MODE_PRECISION (mode
);
3919 /* For floating-point and vector values, assume all bits are needed. */
3920 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
)
3921 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
3924 /* If X is wider than MODE, use its mode instead. */
3925 if (GET_MODE_PRECISION (GET_MODE (x
)) > mode_width
)
3927 mode
= GET_MODE (x
);
3928 nonzero
= GET_MODE_MASK (mode
);
3929 mode_width
= GET_MODE_PRECISION (mode
);
3932 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
3933 /* Our only callers in this case look for single bit values. So
3934 just return the mode mask. Those tests will then be false. */
3937 #ifndef WORD_REGISTER_OPERATIONS
3938 /* If MODE is wider than X, but both are a single word for both the host
3939 and target machines, we can compute this from which bits of the
3940 object might be nonzero in its own mode, taking into account the fact
3941 that on many CISC machines, accessing an object in a wider mode
3942 causes the high-order bits to become undefined. So they are
3943 not known to be zero. */
3945 if (GET_MODE (x
) != VOIDmode
&& GET_MODE (x
) != mode
3946 && GET_MODE_PRECISION (GET_MODE (x
)) <= BITS_PER_WORD
3947 && GET_MODE_PRECISION (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
3948 && GET_MODE_PRECISION (mode
) > GET_MODE_PRECISION (GET_MODE (x
)))
3950 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
3951 known_x
, known_mode
, known_ret
);
3952 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
3957 code
= GET_CODE (x
);
3961 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3962 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3963 all the bits above ptr_mode are known to be zero. */
3964 /* As we do not know which address space the pointer is refering to,
3965 we can do this only if the target does not support different pointer
3966 or address modes depending on the address space. */
3967 if (target_default_pointer_address_modes_p ()
3968 && POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
3970 nonzero
&= GET_MODE_MASK (ptr_mode
);
3973 /* Include declared information about alignment of pointers. */
3974 /* ??? We don't properly preserve REG_POINTER changes across
3975 pointer-to-integer casts, so we can't trust it except for
3976 things that we know must be pointers. See execute/960116-1.c. */
3977 if ((x
== stack_pointer_rtx
3978 || x
== frame_pointer_rtx
3979 || x
== arg_pointer_rtx
)
3980 && REGNO_POINTER_ALIGN (REGNO (x
)))
3982 unsigned HOST_WIDE_INT alignment
3983 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
3985 #ifdef PUSH_ROUNDING
3986 /* If PUSH_ROUNDING is defined, it is possible for the
3987 stack to be momentarily aligned only to that amount,
3988 so we pick the least alignment. */
3989 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
3990 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
3994 nonzero
&= ~(alignment
- 1);
3998 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
3999 rtx new_rtx
= rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
4000 known_mode
, known_ret
,
4004 nonzero_for_hook
&= cached_nonzero_bits (new_rtx
, mode
, known_x
,
4005 known_mode
, known_ret
);
4007 return nonzero_for_hook
;
4011 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
4012 /* If X is negative in MODE, sign-extend the value. */
4014 && mode_width
< BITS_PER_WORD
4015 && (UINTVAL (x
) & ((unsigned HOST_WIDE_INT
) 1 << (mode_width
- 1)))
4017 return UINTVAL (x
) | ((unsigned HOST_WIDE_INT
) (-1) << mode_width
);
4023 #ifdef LOAD_EXTEND_OP
4024 /* In many, if not most, RISC machines, reading a byte from memory
4025 zeros the rest of the register. Noticing that fact saves a lot
4026 of extra zero-extends. */
4027 if (LOAD_EXTEND_OP (GET_MODE (x
)) == ZERO_EXTEND
)
4028 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
4033 case UNEQ
: case LTGT
:
4034 case GT
: case GTU
: case UNGT
:
4035 case LT
: case LTU
: case UNLT
:
4036 case GE
: case GEU
: case UNGE
:
4037 case LE
: case LEU
: case UNLE
:
4038 case UNORDERED
: case ORDERED
:
4039 /* If this produces an integer result, we know which bits are set.
4040 Code here used to clear bits outside the mode of X, but that is
4042 /* Mind that MODE is the mode the caller wants to look at this
4043 operation in, and not the actual operation mode. We can wind
4044 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4045 that describes the results of a vector compare. */
4046 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
4047 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
4048 nonzero
= STORE_FLAG_VALUE
;
4053 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4054 and num_sign_bit_copies. */
4055 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4056 == GET_MODE_PRECISION (GET_MODE (x
)))
4060 if (GET_MODE_PRECISION (GET_MODE (x
)) < mode_width
)
4061 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
4066 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4067 and num_sign_bit_copies. */
4068 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
4069 == GET_MODE_PRECISION (GET_MODE (x
)))
4075 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
4076 known_x
, known_mode
, known_ret
)
4077 & GET_MODE_MASK (mode
));
4081 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4082 known_x
, known_mode
, known_ret
);
4083 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4084 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4088 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4089 Otherwise, show all the bits in the outer mode but not the inner
4091 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
4092 known_x
, known_mode
, known_ret
);
4093 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
4095 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
4096 if (val_signbit_known_set_p (GET_MODE (XEXP (x
, 0)), inner_nz
))
4097 inner_nz
|= (GET_MODE_MASK (mode
)
4098 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
4101 nonzero
&= inner_nz
;
4105 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
4106 known_x
, known_mode
, known_ret
)
4107 & cached_nonzero_bits (XEXP (x
, 1), mode
,
4108 known_x
, known_mode
, known_ret
);
4112 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
4114 unsigned HOST_WIDE_INT nonzero0
4115 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4116 known_x
, known_mode
, known_ret
);
4118 /* Don't call nonzero_bits for the second time if it cannot change
4120 if ((nonzero
& nonzero0
) != nonzero
)
4122 | cached_nonzero_bits (XEXP (x
, 1), mode
,
4123 known_x
, known_mode
, known_ret
);
4127 case PLUS
: case MINUS
:
4129 case DIV
: case UDIV
:
4130 case MOD
: case UMOD
:
4131 /* We can apply the rules of arithmetic to compute the number of
4132 high- and low-order zero bits of these operations. We start by
4133 computing the width (position of the highest-order nonzero bit)
4134 and the number of low-order zero bits for each value. */
4136 unsigned HOST_WIDE_INT nz0
4137 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4138 known_x
, known_mode
, known_ret
);
4139 unsigned HOST_WIDE_INT nz1
4140 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4141 known_x
, known_mode
, known_ret
);
4142 int sign_index
= GET_MODE_PRECISION (GET_MODE (x
)) - 1;
4143 int width0
= floor_log2 (nz0
) + 1;
4144 int width1
= floor_log2 (nz1
) + 1;
4145 int low0
= floor_log2 (nz0
& -nz0
);
4146 int low1
= floor_log2 (nz1
& -nz1
);
4147 unsigned HOST_WIDE_INT op0_maybe_minusp
4148 = nz0
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4149 unsigned HOST_WIDE_INT op1_maybe_minusp
4150 = nz1
& ((unsigned HOST_WIDE_INT
) 1 << sign_index
);
4151 unsigned int result_width
= mode_width
;
4157 result_width
= MAX (width0
, width1
) + 1;
4158 result_low
= MIN (low0
, low1
);
4161 result_low
= MIN (low0
, low1
);
4164 result_width
= width0
+ width1
;
4165 result_low
= low0
+ low1
;
4170 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4171 result_width
= width0
;
4176 result_width
= width0
;
4181 if (!op0_maybe_minusp
&& !op1_maybe_minusp
)
4182 result_width
= MIN (width0
, width1
);
4183 result_low
= MIN (low0
, low1
);
4188 result_width
= MIN (width0
, width1
);
4189 result_low
= MIN (low0
, low1
);
4195 if (result_width
< mode_width
)
4196 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << result_width
) - 1;
4199 nonzero
&= ~(((unsigned HOST_WIDE_INT
) 1 << result_low
) - 1);
4204 if (CONST_INT_P (XEXP (x
, 1))
4205 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
4206 nonzero
&= ((unsigned HOST_WIDE_INT
) 1 << INTVAL (XEXP (x
, 1))) - 1;
4210 /* If this is a SUBREG formed for a promoted variable that has
4211 been zero-extended, we know that at least the high-order bits
4212 are zero, though others might be too. */
4214 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
) > 0)
4215 nonzero
= GET_MODE_MASK (GET_MODE (x
))
4216 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
4217 known_x
, known_mode
, known_ret
);
4219 inner_mode
= GET_MODE (SUBREG_REG (x
));
4220 /* If the inner mode is a single word for both the host and target
4221 machines, we can compute this from which bits of the inner
4222 object might be nonzero. */
4223 if (GET_MODE_PRECISION (inner_mode
) <= BITS_PER_WORD
4224 && (GET_MODE_PRECISION (inner_mode
) <= HOST_BITS_PER_WIDE_INT
))
4226 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
4227 known_x
, known_mode
, known_ret
);
4229 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
4230 /* If this is a typical RISC machine, we only have to worry
4231 about the way loads are extended. */
4232 if ((LOAD_EXTEND_OP (inner_mode
) == SIGN_EXTEND
4233 ? val_signbit_known_set_p (inner_mode
, nonzero
)
4234 : LOAD_EXTEND_OP (inner_mode
) != ZERO_EXTEND
)
4235 || !MEM_P (SUBREG_REG (x
)))
4238 /* On many CISC machines, accessing an object in a wider mode
4239 causes the high-order bits to become undefined. So they are
4240 not known to be zero. */
4241 if (GET_MODE_PRECISION (GET_MODE (x
))
4242 > GET_MODE_PRECISION (inner_mode
))
4243 nonzero
|= (GET_MODE_MASK (GET_MODE (x
))
4244 & ~GET_MODE_MASK (inner_mode
));
4253 /* The nonzero bits are in two classes: any bits within MODE
4254 that aren't in GET_MODE (x) are always significant. The rest of the
4255 nonzero bits are those that are significant in the operand of
4256 the shift when shifted the appropriate number of bits. This
4257 shows that high-order bits are cleared by the right shift and
4258 low-order bits by left shifts. */
4259 if (CONST_INT_P (XEXP (x
, 1))
4260 && INTVAL (XEXP (x
, 1)) >= 0
4261 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
4262 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4264 enum machine_mode inner_mode
= GET_MODE (x
);
4265 unsigned int width
= GET_MODE_PRECISION (inner_mode
);
4266 int count
= INTVAL (XEXP (x
, 1));
4267 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
4268 unsigned HOST_WIDE_INT op_nonzero
4269 = cached_nonzero_bits (XEXP (x
, 0), mode
,
4270 known_x
, known_mode
, known_ret
);
4271 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
4272 unsigned HOST_WIDE_INT outer
= 0;
4274 if (mode_width
> width
)
4275 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
4277 if (code
== LSHIFTRT
)
4279 else if (code
== ASHIFTRT
)
4283 /* If the sign bit may have been nonzero before the shift, we
4284 need to mark all the places it could have been copied to
4285 by the shift as possibly nonzero. */
4286 if (inner
& ((unsigned HOST_WIDE_INT
) 1 << (width
- 1 - count
)))
4287 inner
|= (((unsigned HOST_WIDE_INT
) 1 << count
) - 1)
4290 else if (code
== ASHIFT
)
4293 inner
= ((inner
<< (count
% width
)
4294 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
4296 nonzero
&= (outer
| inner
);
4302 /* This is at most the number of bits in the mode. */
4303 nonzero
= ((unsigned HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
4307 /* If CLZ has a known value at zero, then the nonzero bits are
4308 that value, plus the number of bits in the mode minus one. */
4309 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4311 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4317 /* If CTZ has a known value at zero, then the nonzero bits are
4318 that value, plus the number of bits in the mode minus one. */
4319 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
4321 |= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4327 /* This is at most the number of bits in the mode minus 1. */
4328 nonzero
= ((unsigned HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
4337 unsigned HOST_WIDE_INT nonzero_true
4338 = cached_nonzero_bits (XEXP (x
, 1), mode
,
4339 known_x
, known_mode
, known_ret
);
4341 /* Don't call nonzero_bits for the second time if it cannot change
4343 if ((nonzero
& nonzero_true
) != nonzero
)
4344 nonzero
&= nonzero_true
4345 | cached_nonzero_bits (XEXP (x
, 2), mode
,
4346 known_x
, known_mode
, known_ret
);
4357 /* See the macro definition above. */
4358 #undef cached_num_sign_bit_copies
4361 /* The function cached_num_sign_bit_copies is a wrapper around
4362 num_sign_bit_copies1. It avoids exponential behavior in
4363 num_sign_bit_copies1 when X has identical subexpressions on the
4364 first or the second level. */
4367 cached_num_sign_bit_copies (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4368 enum machine_mode known_mode
,
4369 unsigned int known_ret
)
4371 if (x
== known_x
&& mode
== known_mode
)
4374 /* Try to find identical subexpressions. If found call
4375 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4376 the precomputed value for the subexpression as KNOWN_RET. */
4378 if (ARITHMETIC_P (x
))
4380 rtx x0
= XEXP (x
, 0);
4381 rtx x1
= XEXP (x
, 1);
4383 /* Check the first level. */
4386 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4387 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4391 /* Check the second level. */
4392 if (ARITHMETIC_P (x0
)
4393 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
4395 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
4396 cached_num_sign_bit_copies (x1
, mode
, known_x
,
4400 if (ARITHMETIC_P (x1
)
4401 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
4403 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
4404 cached_num_sign_bit_copies (x0
, mode
, known_x
,
4409 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
4412 /* Return the number of bits at the high-order end of X that are known to
4413 be equal to the sign bit. X will be used in mode MODE; if MODE is
4414 VOIDmode, X will be used in its own mode. The returned value will always
4415 be between 1 and the number of bits in MODE. */
4418 num_sign_bit_copies1 (const_rtx x
, enum machine_mode mode
, const_rtx known_x
,
4419 enum machine_mode known_mode
,
4420 unsigned int known_ret
)
4422 enum rtx_code code
= GET_CODE (x
);
4423 unsigned int bitwidth
= GET_MODE_PRECISION (mode
);
4424 int num0
, num1
, result
;
4425 unsigned HOST_WIDE_INT nonzero
;
4427 /* If we weren't given a mode, use the mode of X. If the mode is still
4428 VOIDmode, we don't know anything. Likewise if one of the modes is
4431 if (mode
== VOIDmode
)
4432 mode
= GET_MODE (x
);
4434 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
))
4435 || VECTOR_MODE_P (GET_MODE (x
)) || VECTOR_MODE_P (mode
))
4438 /* For a smaller object, just ignore the high bits. */
4439 if (bitwidth
< GET_MODE_PRECISION (GET_MODE (x
)))
4441 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
4442 known_x
, known_mode
, known_ret
);
4444 num0
- (int) (GET_MODE_PRECISION (GET_MODE (x
)) - bitwidth
));
4447 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_PRECISION (GET_MODE (x
)))
4449 #ifndef WORD_REGISTER_OPERATIONS
4450 /* If this machine does not do all register operations on the entire
4451 register and MODE is wider than the mode of X, we can say nothing
4452 at all about the high-order bits. */
4455 /* Likewise on machines that do, if the mode of the object is smaller
4456 than a word and loads of that size don't sign extend, we can say
4457 nothing about the high order bits. */
4458 if (GET_MODE_PRECISION (GET_MODE (x
)) < BITS_PER_WORD
4459 #ifdef LOAD_EXTEND_OP
4460 && LOAD_EXTEND_OP (GET_MODE (x
)) != SIGN_EXTEND
4471 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
4472 /* If pointers extend signed and this is a pointer in Pmode, say that
4473 all the bits above ptr_mode are known to be sign bit copies. */
4474 /* As we do not know which address space the pointer is refering to,
4475 we can do this only if the target does not support different pointer
4476 or address modes depending on the address space. */
4477 if (target_default_pointer_address_modes_p ()
4478 && ! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4479 && mode
== Pmode
&& REG_POINTER (x
))
4480 return GET_MODE_PRECISION (Pmode
) - GET_MODE_PRECISION (ptr_mode
) + 1;
4484 unsigned int copies_for_hook
= 1, copies
= 1;
4485 rtx new_rtx
= rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
4486 known_mode
, known_ret
,
4490 copies
= cached_num_sign_bit_copies (new_rtx
, mode
, known_x
,
4491 known_mode
, known_ret
);
4493 if (copies
> 1 || copies_for_hook
> 1)
4494 return MAX (copies
, copies_for_hook
);
4496 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4501 #ifdef LOAD_EXTEND_OP
4502 /* Some RISC machines sign-extend all loads of smaller than a word. */
4503 if (LOAD_EXTEND_OP (GET_MODE (x
)) == SIGN_EXTEND
)
4504 return MAX (1, ((int) bitwidth
4505 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1));
4510 /* If the constant is negative, take its 1's complement and remask.
4511 Then see how many zero bits we have. */
4512 nonzero
= UINTVAL (x
) & GET_MODE_MASK (mode
);
4513 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4514 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4515 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4517 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4520 /* If this is a SUBREG for a promoted object that is sign-extended
4521 and we are looking at it in a wider mode, we know that at least the
4522 high-order bits are known to be sign bit copies. */
4524 if (SUBREG_PROMOTED_VAR_P (x
) && ! SUBREG_PROMOTED_UNSIGNED_P (x
))
4526 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4527 known_x
, known_mode
, known_ret
);
4528 return MAX ((int) bitwidth
4529 - (int) GET_MODE_PRECISION (GET_MODE (x
)) + 1,
4533 /* For a smaller object, just ignore the high bits. */
4534 if (bitwidth
<= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
))))
4536 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4537 known_x
, known_mode
, known_ret
);
4538 return MAX (1, (num0
4539 - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x
)))
4543 #ifdef WORD_REGISTER_OPERATIONS
4544 #ifdef LOAD_EXTEND_OP
4545 /* For paradoxical SUBREGs on machines where all register operations
4546 affect the entire register, just look inside. Note that we are
4547 passing MODE to the recursive call, so the number of sign bit copies
4548 will remain relative to that mode, not the inner mode. */
4550 /* This works only if loads sign extend. Otherwise, if we get a
4551 reload for the inner part, it may be loaded from the stack, and
4552 then we lose all sign bit copies that existed before the store
4555 if (paradoxical_subreg_p (x
)
4556 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4557 && MEM_P (SUBREG_REG (x
)))
4558 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4559 known_x
, known_mode
, known_ret
);
4565 if (CONST_INT_P (XEXP (x
, 1)))
4566 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
4570 return (bitwidth
- GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4571 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4572 known_x
, known_mode
, known_ret
));
4575 /* For a smaller object, just ignore the high bits. */
4576 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4577 known_x
, known_mode
, known_ret
);
4578 return MAX (1, (num0
- (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x
, 0)))
4582 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4583 known_x
, known_mode
, known_ret
);
4585 case ROTATE
: case ROTATERT
:
4586 /* If we are rotating left by a number of bits less than the number
4587 of sign bit copies, we can just subtract that amount from the
4589 if (CONST_INT_P (XEXP (x
, 1))
4590 && INTVAL (XEXP (x
, 1)) >= 0
4591 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
4593 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4594 known_x
, known_mode
, known_ret
);
4595 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
4596 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
4601 /* In general, this subtracts one sign bit copy. But if the value
4602 is known to be positive, the number of sign bit copies is the
4603 same as that of the input. Finally, if the input has just one bit
4604 that might be nonzero, all the bits are copies of the sign bit. */
4605 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4606 known_x
, known_mode
, known_ret
);
4607 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4608 return num0
> 1 ? num0
- 1 : 1;
4610 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4615 && (((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
))
4620 case IOR
: case AND
: case XOR
:
4621 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
4622 /* Logical operations will preserve the number of sign-bit copies.
4623 MIN and MAX operations always return one of the operands. */
4624 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4625 known_x
, known_mode
, known_ret
);
4626 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4627 known_x
, known_mode
, known_ret
);
4629 /* If num1 is clearing some of the top bits then regardless of
4630 the other term, we are guaranteed to have at least that many
4631 high-order zero bits. */
4634 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4635 && CONST_INT_P (XEXP (x
, 1))
4636 && (UINTVAL (XEXP (x
, 1))
4637 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) == 0)
4640 /* Similarly for IOR when setting high-order bits. */
4643 && bitwidth
<= HOST_BITS_PER_WIDE_INT
4644 && CONST_INT_P (XEXP (x
, 1))
4645 && (UINTVAL (XEXP (x
, 1))
4646 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4649 return MIN (num0
, num1
);
4651 case PLUS
: case MINUS
:
4652 /* For addition and subtraction, we can have a 1-bit carry. However,
4653 if we are subtracting 1 from a positive number, there will not
4654 be such a carry. Furthermore, if the positive number is known to
4655 be 0 or 1, we know the result is either -1 or 0. */
4657 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
4658 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
4660 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4661 if ((((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
) == 0)
4662 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
4663 : bitwidth
- floor_log2 (nonzero
) - 1);
4666 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4667 known_x
, known_mode
, known_ret
);
4668 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4669 known_x
, known_mode
, known_ret
);
4670 result
= MAX (1, MIN (num0
, num1
) - 1);
4675 /* The number of bits of the product is the sum of the number of
4676 bits of both terms. However, unless one of the terms if known
4677 to be positive, we must allow for an additional bit since negating
4678 a negative number can remove one sign bit copy. */
4680 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4681 known_x
, known_mode
, known_ret
);
4682 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4683 known_x
, known_mode
, known_ret
);
4685 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
4687 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4688 || (((nonzero_bits (XEXP (x
, 0), mode
)
4689 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4690 && ((nonzero_bits (XEXP (x
, 1), mode
)
4691 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1)))
4695 return MAX (1, result
);
4698 /* The result must be <= the first operand. If the first operand
4699 has the high bit set, we know nothing about the number of sign
4701 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4703 else if ((nonzero_bits (XEXP (x
, 0), mode
)
4704 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4707 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4708 known_x
, known_mode
, known_ret
);
4711 /* The result must be <= the second operand. If the second operand
4712 has (or just might have) the high bit set, we know nothing about
4713 the number of sign bit copies. */
4714 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4716 else if ((nonzero_bits (XEXP (x
, 1), mode
)
4717 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4720 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4721 known_x
, known_mode
, known_ret
);
4724 /* Similar to unsigned division, except that we have to worry about
4725 the case where the divisor is negative, in which case we have
4727 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4728 known_x
, known_mode
, known_ret
);
4730 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4731 || (nonzero_bits (XEXP (x
, 1), mode
)
4732 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4738 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4739 known_x
, known_mode
, known_ret
);
4741 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4742 || (nonzero_bits (XEXP (x
, 1), mode
)
4743 & ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4749 /* Shifts by a constant add to the number of bits equal to the
4751 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4752 known_x
, known_mode
, known_ret
);
4753 if (CONST_INT_P (XEXP (x
, 1))
4754 && INTVAL (XEXP (x
, 1)) > 0
4755 && INTVAL (XEXP (x
, 1)) < GET_MODE_PRECISION (GET_MODE (x
)))
4756 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
4761 /* Left shifts destroy copies. */
4762 if (!CONST_INT_P (XEXP (x
, 1))
4763 || INTVAL (XEXP (x
, 1)) < 0
4764 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
4765 || INTVAL (XEXP (x
, 1)) >= GET_MODE_PRECISION (GET_MODE (x
)))
4768 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4769 known_x
, known_mode
, known_ret
);
4770 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
4773 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4774 known_x
, known_mode
, known_ret
);
4775 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
4776 known_x
, known_mode
, known_ret
);
4777 return MIN (num0
, num1
);
4779 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
4780 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
4781 case GEU
: case GTU
: case LEU
: case LTU
:
4782 case UNORDERED
: case ORDERED
:
4783 /* If the constant is negative, take its 1's complement and remask.
4784 Then see how many zero bits we have. */
4785 nonzero
= STORE_FLAG_VALUE
;
4786 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4787 && (nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4788 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4790 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4796 /* If we haven't been able to figure it out by one of the above rules,
4797 see if some of the high-order bits are known to be zero. If so,
4798 count those bits and return one less than that amount. If we can't
4799 safely compute the mask for this mode, always return BITWIDTH. */
4801 bitwidth
= GET_MODE_PRECISION (mode
);
4802 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4805 nonzero
= nonzero_bits (x
, mode
);
4806 return nonzero
& ((unsigned HOST_WIDE_INT
) 1 << (bitwidth
- 1))
4807 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
4810 /* Calculate the rtx_cost of a single instruction. A return value of
4811 zero indicates an instruction pattern without a known cost. */
4814 insn_rtx_cost (rtx pat
, bool speed
)
4819 /* Extract the single set rtx from the instruction pattern.
4820 We can't use single_set since we only have the pattern. */
4821 if (GET_CODE (pat
) == SET
)
4823 else if (GET_CODE (pat
) == PARALLEL
)
4826 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
4828 rtx x
= XVECEXP (pat
, 0, i
);
4829 if (GET_CODE (x
) == SET
)
4842 cost
= set_src_cost (SET_SRC (set
), speed
);
4843 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
4846 /* Given an insn INSN and condition COND, return the condition in a
4847 canonical form to simplify testing by callers. Specifically:
4849 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4850 (2) Both operands will be machine operands; (cc0) will have been replaced.
4851 (3) If an operand is a constant, it will be the second operand.
4852 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4853 for GE, GEU, and LEU.
4855 If the condition cannot be understood, or is an inequality floating-point
4856 comparison which needs to be reversed, 0 will be returned.
4858 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4860 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4861 insn used in locating the condition was found. If a replacement test
4862 of the condition is desired, it should be placed in front of that
4863 insn and we will be sure that the inputs are still valid.
4865 If WANT_REG is nonzero, we wish the condition to be relative to that
4866 register, if possible. Therefore, do not canonicalize the condition
4867 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4868 to be a compare to a CC mode register.
4870 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4874 canonicalize_condition (rtx insn
, rtx cond
, int reverse
, rtx
*earliest
,
4875 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
4882 int reverse_code
= 0;
4883 enum machine_mode mode
;
4884 basic_block bb
= BLOCK_FOR_INSN (insn
);
4886 code
= GET_CODE (cond
);
4887 mode
= GET_MODE (cond
);
4888 op0
= XEXP (cond
, 0);
4889 op1
= XEXP (cond
, 1);
4892 code
= reversed_comparison_code (cond
, insn
);
4893 if (code
== UNKNOWN
)
4899 /* If we are comparing a register with zero, see if the register is set
4900 in the previous insn to a COMPARE or a comparison operation. Perform
4901 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4904 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
4905 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
4906 && op1
== CONST0_RTX (GET_MODE (op0
))
4909 /* Set nonzero when we find something of interest. */
4913 /* If comparison with cc0, import actual comparison from compare
4917 if ((prev
= prev_nonnote_insn (prev
)) == 0
4918 || !NONJUMP_INSN_P (prev
)
4919 || (set
= single_set (prev
)) == 0
4920 || SET_DEST (set
) != cc0_rtx
)
4923 op0
= SET_SRC (set
);
4924 op1
= CONST0_RTX (GET_MODE (op0
));
4930 /* If this is a COMPARE, pick up the two things being compared. */
4931 if (GET_CODE (op0
) == COMPARE
)
4933 op1
= XEXP (op0
, 1);
4934 op0
= XEXP (op0
, 0);
4937 else if (!REG_P (op0
))
4940 /* Go back to the previous insn. Stop if it is not an INSN. We also
4941 stop if it isn't a single set or if it has a REG_INC note because
4942 we don't want to bother dealing with it. */
4944 prev
= prev_nonnote_nondebug_insn (prev
);
4947 || !NONJUMP_INSN_P (prev
)
4948 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
4949 /* In cfglayout mode, there do not have to be labels at the
4950 beginning of a block, or jumps at the end, so the previous
4951 conditions would not stop us when we reach bb boundary. */
4952 || BLOCK_FOR_INSN (prev
) != bb
)
4955 set
= set_of (op0
, prev
);
4958 && (GET_CODE (set
) != SET
4959 || !rtx_equal_p (SET_DEST (set
), op0
)))
4962 /* If this is setting OP0, get what it sets it to if it looks
4966 enum machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
4967 #ifdef FLOAT_STORE_FLAG_VALUE
4968 REAL_VALUE_TYPE fsfv
;
4971 /* ??? We may not combine comparisons done in a CCmode with
4972 comparisons not done in a CCmode. This is to aid targets
4973 like Alpha that have an IEEE compliant EQ instruction, and
4974 a non-IEEE compliant BEQ instruction. The use of CCmode is
4975 actually artificial, simply to prevent the combination, but
4976 should not affect other platforms.
4978 However, we must allow VOIDmode comparisons to match either
4979 CCmode or non-CCmode comparison, because some ports have
4980 modeless comparisons inside branch patterns.
4982 ??? This mode check should perhaps look more like the mode check
4983 in simplify_comparison in combine. */
4985 if ((GET_CODE (SET_SRC (set
)) == COMPARE
4988 && val_signbit_known_set_p (inner_mode
,
4990 #ifdef FLOAT_STORE_FLAG_VALUE
4992 && SCALAR_FLOAT_MODE_P (inner_mode
)
4993 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
4994 REAL_VALUE_NEGATIVE (fsfv
)))
4997 && COMPARISON_P (SET_SRC (set
))))
4998 && (((GET_MODE_CLASS (mode
) == MODE_CC
)
4999 == (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5000 || mode
== VOIDmode
|| inner_mode
== VOIDmode
))
5002 else if (((code
== EQ
5004 && val_signbit_known_set_p (inner_mode
,
5006 #ifdef FLOAT_STORE_FLAG_VALUE
5008 && SCALAR_FLOAT_MODE_P (inner_mode
)
5009 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
5010 REAL_VALUE_NEGATIVE (fsfv
)))
5013 && COMPARISON_P (SET_SRC (set
))
5014 && (((GET_MODE_CLASS (mode
) == MODE_CC
)
5015 == (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
5016 || mode
== VOIDmode
|| inner_mode
== VOIDmode
))
5026 else if (reg_set_p (op0
, prev
))
5027 /* If this sets OP0, but not directly, we have to give up. */
5032 /* If the caller is expecting the condition to be valid at INSN,
5033 make sure X doesn't change before INSN. */
5034 if (valid_at_insn_p
)
5035 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
5037 if (COMPARISON_P (x
))
5038 code
= GET_CODE (x
);
5041 code
= reversed_comparison_code (x
, prev
);
5042 if (code
== UNKNOWN
)
5047 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
5053 /* If constant is first, put it last. */
5054 if (CONSTANT_P (op0
))
5055 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
5057 /* If OP0 is the result of a comparison, we weren't able to find what
5058 was really being compared, so fail. */
5060 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
5063 /* Canonicalize any ordered comparison with integers involving equality
5064 if we can do computations in the relevant mode and we do not
5067 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
5068 && CONST_INT_P (op1
)
5069 && GET_MODE (op0
) != VOIDmode
5070 && GET_MODE_PRECISION (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
5072 HOST_WIDE_INT const_val
= INTVAL (op1
);
5073 unsigned HOST_WIDE_INT uconst_val
= const_val
;
5074 unsigned HOST_WIDE_INT max_val
5075 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
5080 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
5081 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
5084 /* When cross-compiling, const_val might be sign-extended from
5085 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5087 if ((const_val
& max_val
)
5088 != ((unsigned HOST_WIDE_INT
) 1
5089 << (GET_MODE_PRECISION (GET_MODE (op0
)) - 1)))
5090 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
5094 if (uconst_val
< max_val
)
5095 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
5099 if (uconst_val
!= 0)
5100 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
5108 /* Never return CC0; return zero instead. */
5112 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
5115 /* Given a jump insn JUMP, return the condition that will cause it to branch
5116 to its JUMP_LABEL. If the condition cannot be understood, or is an
5117 inequality floating-point comparison which needs to be reversed, 0 will
5120 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5121 insn used in locating the condition was found. If a replacement test
5122 of the condition is desired, it should be placed in front of that
5123 insn and we will be sure that the inputs are still valid. If EARLIEST
5124 is null, the returned condition will be valid at INSN.
5126 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5127 compare CC mode register.
5129 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5132 get_condition (rtx jump
, rtx
*earliest
, int allow_cc_mode
, int valid_at_insn_p
)
5138 /* If this is not a standard conditional jump, we can't parse it. */
5140 || ! any_condjump_p (jump
))
5142 set
= pc_set (jump
);
5144 cond
= XEXP (SET_SRC (set
), 0);
5146 /* If this branches to JUMP_LABEL when the condition is false, reverse
5149 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
5150 && XEXP (XEXP (SET_SRC (set
), 2), 0) == JUMP_LABEL (jump
);
5152 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
5153 allow_cc_mode
, valid_at_insn_p
);
5156 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5157 TARGET_MODE_REP_EXTENDED.
5159 Note that we assume that the property of
5160 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5161 narrower than mode B. I.e., if A is a mode narrower than B then in
5162 order to be able to operate on it in mode B, mode A needs to
5163 satisfy the requirements set by the representation of mode B. */
5166 init_num_sign_bit_copies_in_rep (void)
5168 enum machine_mode mode
, in_mode
;
5170 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
5171 in_mode
= GET_MODE_WIDER_MODE (mode
))
5172 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
5173 mode
= GET_MODE_WIDER_MODE (mode
))
5175 enum machine_mode i
;
5177 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5178 extends to the next widest mode. */
5179 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
5180 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
5182 /* We are in in_mode. Count how many bits outside of mode
5183 have to be copies of the sign-bit. */
5184 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
5186 enum machine_mode wider
= GET_MODE_WIDER_MODE (i
);
5188 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
5189 /* We can only check sign-bit copies starting from the
5190 top-bit. In order to be able to check the bits we
5191 have already seen we pretend that subsequent bits
5192 have to be sign-bit copies too. */
5193 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
5194 num_sign_bit_copies_in_rep
[in_mode
][mode
]
5195 += GET_MODE_PRECISION (wider
) - GET_MODE_PRECISION (i
);
5200 /* Suppose that truncation from the machine mode of X to MODE is not a
5201 no-op. See if there is anything special about X so that we can
5202 assume it already contains a truncated value of MODE. */
5205 truncated_to_mode (enum machine_mode mode
, const_rtx x
)
5207 /* This register has already been used in MODE without explicit
5209 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
5212 /* See if we already satisfy the requirements of MODE. If yes we
5213 can just switch to MODE. */
5214 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
5215 && (num_sign_bit_copies (x
, GET_MODE (x
))
5216 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
5222 /* Initialize non_rtx_starting_operands, which is used to speed up
5228 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
5230 const char *format
= GET_RTX_FORMAT (i
);
5231 const char *first
= strpbrk (format
, "eEV");
5232 non_rtx_starting_operands
[i
] = first
? first
- format
: -1;
5235 init_num_sign_bit_copies_in_rep ();
5238 /* Check whether this is a constant pool constant. */
5240 constant_pool_constant_p (rtx x
)
5242 x
= avoid_constant_pool_reference (x
);
5243 return GET_CODE (x
) == CONST_DOUBLE
;
5246 /* If M is a bitmask that selects a field of low-order bits within an item but
5247 not the entire word, return the length of the field. Return -1 otherwise.
5248 M is used in machine mode MODE. */
5251 low_bitmask_len (enum machine_mode mode
, unsigned HOST_WIDE_INT m
)
5253 if (mode
!= VOIDmode
)
5255 if (GET_MODE_PRECISION (mode
) > HOST_BITS_PER_WIDE_INT
)
5257 m
&= GET_MODE_MASK (mode
);
5260 return exact_log2 (m
+ 1);