1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
41 /* Forward declarations */
42 static void set_of_1 (rtx
, rtx
, void *);
43 static bool covers_regno_p (rtx
, unsigned int);
44 static bool covers_regno_no_parallel_p (rtx
, unsigned int);
45 static int rtx_referenced_p_1 (rtx
*, void *);
46 static int computed_jump_p_1 (rtx
);
47 static void parms_set (rtx
, rtx
, void *);
49 static unsigned HOST_WIDE_INT
cached_nonzero_bits (rtx
, enum machine_mode
,
50 rtx
, enum machine_mode
,
51 unsigned HOST_WIDE_INT
);
52 static unsigned HOST_WIDE_INT
nonzero_bits1 (rtx
, enum machine_mode
, rtx
,
54 unsigned HOST_WIDE_INT
);
55 static unsigned int cached_num_sign_bit_copies (rtx
, enum machine_mode
, rtx
,
58 static unsigned int num_sign_bit_copies1 (rtx
, enum machine_mode
, rtx
,
59 enum machine_mode
, unsigned int);
61 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
62 -1 if a code has no such operand. */
63 static int non_rtx_starting_operands
[NUM_RTX_CODE
];
65 /* Bit flags that specify the machine subtype we are compiling for.
66 Bits are tested using macros TARGET_... defined in the tm.h file
67 and set by `-m...' switches. Must be defined in rtlanal.c. */
71 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
72 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
73 SIGN_EXTEND then while narrowing we also have to enforce the
74 representation and sign-extend the value to mode DESTINATION_REP.
76 If the value is already sign-extended to DESTINATION_REP mode we
77 can just switch to DESTINATION mode on it. For each pair of
78 integral modes SOURCE and DESTINATION, when truncating from SOURCE
79 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
80 contains the number of high-order bits in SOURCE that have to be
81 copies of the sign-bit so that we can do this mode-switch to
85 num_sign_bit_copies_in_rep
[MAX_MODE_INT
+ 1][MAX_MODE_INT
+ 1];
87 /* Return 1 if the value of X is unstable
88 (would be different at a different point in the program).
89 The frame pointer, arg pointer, etc. are considered stable
90 (within one function) and so is anything marked `unchanging'. */
93 rtx_unstable_p (rtx x
)
95 RTX_CODE code
= GET_CODE (x
);
102 return !MEM_READONLY_P (x
) || rtx_unstable_p (XEXP (x
, 0));
113 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
114 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
115 /* The arg pointer varies if it is not a fixed register. */
116 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
118 #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
119 /* ??? When call-clobbered, the value is stable modulo the restore
120 that must happen after a call. This currently screws up local-alloc
121 into believing that the restore is not needed. */
122 if (x
== pic_offset_table_rtx
)
128 if (MEM_VOLATILE_P (x
))
137 fmt
= GET_RTX_FORMAT (code
);
138 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
141 if (rtx_unstable_p (XEXP (x
, i
)))
144 else if (fmt
[i
] == 'E')
147 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
148 if (rtx_unstable_p (XVECEXP (x
, i
, j
)))
155 /* Return 1 if X has a value that can vary even between two
156 executions of the program. 0 means X can be compared reliably
157 against certain constants or near-constants.
158 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
159 zero, we are slightly more conservative.
160 The frame pointer and the arg pointer are considered constant. */
163 rtx_varies_p (rtx x
, int for_alias
)
176 return !MEM_READONLY_P (x
) || rtx_varies_p (XEXP (x
, 0), for_alias
);
187 /* Note that we have to test for the actual rtx used for the frame
188 and arg pointers and not just the register number in case we have
189 eliminated the frame and/or arg pointer and are using it
191 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
192 /* The arg pointer varies if it is not a fixed register. */
193 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
195 if (x
== pic_offset_table_rtx
196 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
197 /* ??? When call-clobbered, the value is stable modulo the restore
198 that must happen after a call. This currently screws up
199 local-alloc into believing that the restore is not needed, so we
200 must return 0 only if we are called from alias analysis. */
208 /* The operand 0 of a LO_SUM is considered constant
209 (in fact it is related specifically to operand 1)
210 during alias analysis. */
211 return (! for_alias
&& rtx_varies_p (XEXP (x
, 0), for_alias
))
212 || rtx_varies_p (XEXP (x
, 1), for_alias
);
215 if (MEM_VOLATILE_P (x
))
224 fmt
= GET_RTX_FORMAT (code
);
225 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
228 if (rtx_varies_p (XEXP (x
, i
), for_alias
))
231 else if (fmt
[i
] == 'E')
234 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
235 if (rtx_varies_p (XVECEXP (x
, i
, j
), for_alias
))
242 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
243 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
244 whether nonzero is returned for unaligned memory accesses on strict
245 alignment machines. */
248 rtx_addr_can_trap_p_1 (rtx x
, enum machine_mode mode
, bool unaligned_mems
)
250 enum rtx_code code
= GET_CODE (x
);
255 return SYMBOL_REF_WEAK (x
);
261 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
262 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
263 || x
== stack_pointer_rtx
264 /* The arg pointer varies if it is not a fixed register. */
265 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
267 /* All of the virtual frame registers are stack references. */
268 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
269 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
274 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), mode
, unaligned_mems
);
277 /* An address is assumed not to trap if:
278 - it is an address that can't trap plus a constant integer,
279 with the proper remainder modulo the mode size if we are
280 considering unaligned memory references. */
281 if (!rtx_addr_can_trap_p_1 (XEXP (x
, 0), mode
, unaligned_mems
)
282 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
284 HOST_WIDE_INT offset
;
286 if (!STRICT_ALIGNMENT
288 || GET_MODE_SIZE (mode
) == 0)
291 offset
= INTVAL (XEXP (x
, 1));
293 #ifdef SPARC_STACK_BOUNDARY_HACK
294 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
295 the real alignment of %sp. However, when it does this, the
296 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
297 if (SPARC_STACK_BOUNDARY_HACK
298 && (XEXP (x
, 0) == stack_pointer_rtx
299 || XEXP (x
, 0) == hard_frame_pointer_rtx
))
300 offset
-= STACK_POINTER_OFFSET
;
303 return offset
% GET_MODE_SIZE (mode
) != 0;
306 /* - or it is the pic register plus a constant. */
307 if (XEXP (x
, 0) == pic_offset_table_rtx
&& CONSTANT_P (XEXP (x
, 1)))
314 return rtx_addr_can_trap_p_1 (XEXP (x
, 1), mode
, unaligned_mems
);
321 return rtx_addr_can_trap_p_1 (XEXP (x
, 0), mode
, unaligned_mems
);
327 /* If it isn't one of the case above, it can cause a trap. */
331 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
334 rtx_addr_can_trap_p (rtx x
)
336 return rtx_addr_can_trap_p_1 (x
, VOIDmode
, false);
339 /* Return true if X is an address that is known to not be zero. */
342 nonzero_address_p (rtx x
)
344 enum rtx_code code
= GET_CODE (x
);
349 return !SYMBOL_REF_WEAK (x
);
355 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
356 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
357 || x
== stack_pointer_rtx
358 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
]))
360 /* All of the virtual frame registers are stack references. */
361 if (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
362 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
)
367 return nonzero_address_p (XEXP (x
, 0));
370 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
371 return nonzero_address_p (XEXP (x
, 0));
372 /* Handle PIC references. */
373 else if (XEXP (x
, 0) == pic_offset_table_rtx
374 && CONSTANT_P (XEXP (x
, 1)))
379 /* Similar to the above; allow positive offsets. Further, since
380 auto-inc is only allowed in memories, the register must be a
382 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
383 && INTVAL (XEXP (x
, 1)) > 0)
385 return nonzero_address_p (XEXP (x
, 0));
388 /* Similarly. Further, the offset is always positive. */
395 return nonzero_address_p (XEXP (x
, 0));
398 return nonzero_address_p (XEXP (x
, 1));
404 /* If it isn't one of the case above, might be zero. */
408 /* Return 1 if X refers to a memory location whose address
409 cannot be compared reliably with constant addresses,
410 or if X refers to a BLKmode memory object.
411 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
412 zero, we are slightly more conservative. */
415 rtx_addr_varies_p (rtx x
, int for_alias
)
426 return GET_MODE (x
) == BLKmode
|| rtx_varies_p (XEXP (x
, 0), for_alias
);
428 fmt
= GET_RTX_FORMAT (code
);
429 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
432 if (rtx_addr_varies_p (XEXP (x
, i
), for_alias
))
435 else if (fmt
[i
] == 'E')
438 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
439 if (rtx_addr_varies_p (XVECEXP (x
, i
, j
), for_alias
))
445 /* Return the value of the integer term in X, if one is apparent;
447 Only obvious integer terms are detected.
448 This is used in cse.c with the `related_value' field. */
451 get_integer_term (rtx x
)
453 if (GET_CODE (x
) == CONST
)
456 if (GET_CODE (x
) == MINUS
457 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
458 return - INTVAL (XEXP (x
, 1));
459 if (GET_CODE (x
) == PLUS
460 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
461 return INTVAL (XEXP (x
, 1));
465 /* If X is a constant, return the value sans apparent integer term;
467 Only obvious integer terms are detected. */
470 get_related_value (rtx x
)
472 if (GET_CODE (x
) != CONST
)
475 if (GET_CODE (x
) == PLUS
476 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
478 else if (GET_CODE (x
) == MINUS
479 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
484 /* Return the number of places FIND appears within X. If COUNT_DEST is
485 zero, we do not count occurrences inside the destination of a SET. */
488 count_occurrences (rtx x
, rtx find
, int count_dest
)
492 const char *format_ptr
;
513 if (MEM_P (find
) && rtx_equal_p (x
, find
))
518 if (SET_DEST (x
) == find
&& ! count_dest
)
519 return count_occurrences (SET_SRC (x
), find
, count_dest
);
526 format_ptr
= GET_RTX_FORMAT (code
);
529 for (i
= 0; i
< GET_RTX_LENGTH (code
); i
++)
531 switch (*format_ptr
++)
534 count
+= count_occurrences (XEXP (x
, i
), find
, count_dest
);
538 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
539 count
+= count_occurrences (XVECEXP (x
, i
, j
), find
, count_dest
);
546 /* Nonzero if register REG appears somewhere within IN.
547 Also works if REG is not a register; in this case it checks
548 for a subexpression of IN that is Lisp "equal" to REG. */
551 reg_mentioned_p (rtx reg
, rtx in
)
563 if (GET_CODE (in
) == LABEL_REF
)
564 return reg
== XEXP (in
, 0);
566 code
= GET_CODE (in
);
570 /* Compare registers by number. */
572 return REG_P (reg
) && REGNO (in
) == REGNO (reg
);
574 /* These codes have no constituent expressions
584 /* These are kept unique for a given value. */
591 if (GET_CODE (reg
) == code
&& rtx_equal_p (reg
, in
))
594 fmt
= GET_RTX_FORMAT (code
);
596 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
601 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
602 if (reg_mentioned_p (reg
, XVECEXP (in
, i
, j
)))
605 else if (fmt
[i
] == 'e'
606 && reg_mentioned_p (reg
, XEXP (in
, i
)))
612 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
613 no CODE_LABEL insn. */
616 no_labels_between_p (rtx beg
, rtx end
)
621 for (p
= NEXT_INSN (beg
); p
!= end
; p
= NEXT_INSN (p
))
627 /* Nonzero if register REG is used in an insn between
628 FROM_INSN and TO_INSN (exclusive of those two). */
631 reg_used_between_p (rtx reg
, rtx from_insn
, rtx to_insn
)
635 if (from_insn
== to_insn
)
638 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
640 && (reg_overlap_mentioned_p (reg
, PATTERN (insn
))
641 || (CALL_P (insn
) && find_reg_fusage (insn
, USE
, reg
))))
646 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
647 is entirely replaced by a new value and the only use is as a SET_DEST,
648 we do not consider it a reference. */
651 reg_referenced_p (rtx x
, rtx body
)
655 switch (GET_CODE (body
))
658 if (reg_overlap_mentioned_p (x
, SET_SRC (body
)))
661 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
662 of a REG that occupies all of the REG, the insn references X if
663 it is mentioned in the destination. */
664 if (GET_CODE (SET_DEST (body
)) != CC0
665 && GET_CODE (SET_DEST (body
)) != PC
666 && !REG_P (SET_DEST (body
))
667 && ! (GET_CODE (SET_DEST (body
)) == SUBREG
668 && REG_P (SUBREG_REG (SET_DEST (body
)))
669 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body
))))
670 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)
671 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body
)))
672 + (UNITS_PER_WORD
- 1)) / UNITS_PER_WORD
)))
673 && reg_overlap_mentioned_p (x
, SET_DEST (body
)))
678 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
679 if (reg_overlap_mentioned_p (x
, ASM_OPERANDS_INPUT (body
, i
)))
686 return reg_overlap_mentioned_p (x
, body
);
689 return reg_overlap_mentioned_p (x
, TRAP_CONDITION (body
));
692 return reg_overlap_mentioned_p (x
, XEXP (body
, 0));
695 case UNSPEC_VOLATILE
:
696 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
697 if (reg_overlap_mentioned_p (x
, XVECEXP (body
, 0, i
)))
702 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
703 if (reg_referenced_p (x
, XVECEXP (body
, 0, i
)))
708 if (MEM_P (XEXP (body
, 0)))
709 if (reg_overlap_mentioned_p (x
, XEXP (XEXP (body
, 0), 0)))
714 if (reg_overlap_mentioned_p (x
, COND_EXEC_TEST (body
)))
716 return reg_referenced_p (x
, COND_EXEC_CODE (body
));
723 /* Nonzero if register REG is set or clobbered in an insn between
724 FROM_INSN and TO_INSN (exclusive of those two). */
727 reg_set_between_p (rtx reg
, rtx from_insn
, rtx to_insn
)
731 if (from_insn
== to_insn
)
734 for (insn
= NEXT_INSN (from_insn
); insn
!= to_insn
; insn
= NEXT_INSN (insn
))
735 if (INSN_P (insn
) && reg_set_p (reg
, insn
))
740 /* Internals of reg_set_between_p. */
742 reg_set_p (rtx reg
, rtx insn
)
744 /* We can be passed an insn or part of one. If we are passed an insn,
745 check if a side-effect of the insn clobbers REG. */
747 && (FIND_REG_INC_NOTE (insn
, reg
)
750 && REGNO (reg
) < FIRST_PSEUDO_REGISTER
751 && TEST_HARD_REG_BIT (regs_invalidated_by_call
,
754 || find_reg_fusage (insn
, CLOBBER
, reg
)))))
757 return set_of (reg
, insn
) != NULL_RTX
;
760 /* Similar to reg_set_between_p, but check all registers in X. Return 0
761 only if none of them are modified between START and END. Return 1 if
762 X contains a MEM; this routine does usememory aliasing. */
765 modified_between_p (rtx x
, rtx start
, rtx end
)
767 enum rtx_code code
= GET_CODE (x
);
790 if (modified_between_p (XEXP (x
, 0), start
, end
))
792 if (MEM_READONLY_P (x
))
794 for (insn
= NEXT_INSN (start
); insn
!= end
; insn
= NEXT_INSN (insn
))
795 if (memory_modified_in_insn_p (x
, insn
))
801 return reg_set_between_p (x
, start
, end
);
807 fmt
= GET_RTX_FORMAT (code
);
808 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
810 if (fmt
[i
] == 'e' && modified_between_p (XEXP (x
, i
), start
, end
))
813 else if (fmt
[i
] == 'E')
814 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
815 if (modified_between_p (XVECEXP (x
, i
, j
), start
, end
))
822 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
823 of them are modified in INSN. Return 1 if X contains a MEM; this routine
824 does use memory aliasing. */
827 modified_in_p (rtx x
, rtx insn
)
829 enum rtx_code code
= GET_CODE (x
);
848 if (modified_in_p (XEXP (x
, 0), insn
))
850 if (MEM_READONLY_P (x
))
852 if (memory_modified_in_insn_p (x
, insn
))
858 return reg_set_p (x
, insn
);
864 fmt
= GET_RTX_FORMAT (code
);
865 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
867 if (fmt
[i
] == 'e' && modified_in_p (XEXP (x
, i
), insn
))
870 else if (fmt
[i
] == 'E')
871 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
872 if (modified_in_p (XVECEXP (x
, i
, j
), insn
))
879 /* Helper function for set_of. */
887 set_of_1 (rtx x
, rtx pat
, void *data1
)
889 struct set_of_data
*data
= (struct set_of_data
*) (data1
);
890 if (rtx_equal_p (x
, data
->pat
)
891 || (!MEM_P (x
) && reg_overlap_mentioned_p (data
->pat
, x
)))
895 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
896 (either directly or via STRICT_LOW_PART and similar modifiers). */
898 set_of (rtx pat
, rtx insn
)
900 struct set_of_data data
;
901 data
.found
= NULL_RTX
;
903 note_stores (INSN_P (insn
) ? PATTERN (insn
) : insn
, set_of_1
, &data
);
907 /* Given an INSN, return a SET expression if this insn has only a single SET.
908 It may also have CLOBBERs, USEs, or SET whose output
909 will not be used, which we ignore. */
912 single_set_2 (rtx insn
, rtx pat
)
915 int set_verified
= 1;
918 if (GET_CODE (pat
) == PARALLEL
)
920 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
922 rtx sub
= XVECEXP (pat
, 0, i
);
923 switch (GET_CODE (sub
))
930 /* We can consider insns having multiple sets, where all
931 but one are dead as single set insns. In common case
932 only single set is present in the pattern so we want
933 to avoid checking for REG_UNUSED notes unless necessary.
935 When we reach set first time, we just expect this is
936 the single set we are looking for and only when more
937 sets are found in the insn, we check them. */
940 if (find_reg_note (insn
, REG_UNUSED
, SET_DEST (set
))
941 && !side_effects_p (set
))
947 set
= sub
, set_verified
= 0;
948 else if (!find_reg_note (insn
, REG_UNUSED
, SET_DEST (sub
))
949 || side_effects_p (sub
))
961 /* Given an INSN, return nonzero if it has more than one SET, else return
965 multiple_sets (rtx insn
)
970 /* INSN must be an insn. */
974 /* Only a PARALLEL can have multiple SETs. */
975 if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
977 for (i
= 0, found
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
978 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
980 /* If we have already found a SET, then return now. */
988 /* Either zero or one SET. */
992 /* Return nonzero if the destination of SET equals the source
993 and there are no side effects. */
998 rtx src
= SET_SRC (set
);
999 rtx dst
= SET_DEST (set
);
1001 if (dst
== pc_rtx
&& src
== pc_rtx
)
1004 if (MEM_P (dst
) && MEM_P (src
))
1005 return rtx_equal_p (dst
, src
) && !side_effects_p (dst
);
1007 if (GET_CODE (dst
) == ZERO_EXTRACT
)
1008 return rtx_equal_p (XEXP (dst
, 0), src
)
1009 && ! BYTES_BIG_ENDIAN
&& XEXP (dst
, 2) == const0_rtx
1010 && !side_effects_p (src
);
1012 if (GET_CODE (dst
) == STRICT_LOW_PART
)
1013 dst
= XEXP (dst
, 0);
1015 if (GET_CODE (src
) == SUBREG
&& GET_CODE (dst
) == SUBREG
)
1017 if (SUBREG_BYTE (src
) != SUBREG_BYTE (dst
))
1019 src
= SUBREG_REG (src
);
1020 dst
= SUBREG_REG (dst
);
1023 return (REG_P (src
) && REG_P (dst
)
1024 && REGNO (src
) == REGNO (dst
));
1027 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1031 noop_move_p (rtx insn
)
1033 rtx pat
= PATTERN (insn
);
1035 if (INSN_CODE (insn
) == NOOP_MOVE_INSN_CODE
)
1038 /* Insns carrying these notes are useful later on. */
1039 if (find_reg_note (insn
, REG_EQUAL
, NULL_RTX
))
1042 /* For now treat an insn with a REG_RETVAL note as a
1043 a special insn which should not be considered a no-op. */
1044 if (find_reg_note (insn
, REG_RETVAL
, NULL_RTX
))
1047 if (GET_CODE (pat
) == SET
&& set_noop_p (pat
))
1050 if (GET_CODE (pat
) == PARALLEL
)
1053 /* If nothing but SETs of registers to themselves,
1054 this insn can also be deleted. */
1055 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
1057 rtx tem
= XVECEXP (pat
, 0, i
);
1059 if (GET_CODE (tem
) == USE
1060 || GET_CODE (tem
) == CLOBBER
)
1063 if (GET_CODE (tem
) != SET
|| ! set_noop_p (tem
))
1073 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1074 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1075 If the object was modified, if we hit a partial assignment to X, or hit a
1076 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1077 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1081 find_last_value (rtx x
, rtx
*pinsn
, rtx valid_to
, int allow_hwreg
)
1085 for (p
= PREV_INSN (*pinsn
); p
&& !LABEL_P (p
);
1089 rtx set
= single_set (p
);
1090 rtx note
= find_reg_note (p
, REG_EQUAL
, NULL_RTX
);
1092 if (set
&& rtx_equal_p (x
, SET_DEST (set
)))
1094 rtx src
= SET_SRC (set
);
1096 if (note
&& GET_CODE (XEXP (note
, 0)) != EXPR_LIST
)
1097 src
= XEXP (note
, 0);
1099 if ((valid_to
== NULL_RTX
1100 || ! modified_between_p (src
, PREV_INSN (p
), valid_to
))
1101 /* Reject hard registers because we don't usually want
1102 to use them; we'd rather use a pseudo. */
1104 && REGNO (src
) < FIRST_PSEUDO_REGISTER
) || allow_hwreg
))
1111 /* If set in non-simple way, we don't have a value. */
1112 if (reg_set_p (x
, p
))
1119 /* Return nonzero if register in range [REGNO, ENDREGNO)
1120 appears either explicitly or implicitly in X
1121 other than being stored into.
1123 References contained within the substructure at LOC do not count.
1124 LOC may be zero, meaning don't ignore anything. */
1127 refers_to_regno_p (unsigned int regno
, unsigned int endregno
, rtx x
,
1131 unsigned int x_regno
;
1136 /* The contents of a REG_NONNEG note is always zero, so we must come here
1137 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1141 code
= GET_CODE (x
);
1146 x_regno
= REGNO (x
);
1148 /* If we modifying the stack, frame, or argument pointer, it will
1149 clobber a virtual register. In fact, we could be more precise,
1150 but it isn't worth it. */
1151 if ((x_regno
== STACK_POINTER_REGNUM
1152 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1153 || x_regno
== ARG_POINTER_REGNUM
1155 || x_regno
== FRAME_POINTER_REGNUM
)
1156 && regno
>= FIRST_VIRTUAL_REGISTER
&& regno
<= LAST_VIRTUAL_REGISTER
)
1159 return (endregno
> x_regno
1160 && regno
< x_regno
+ (x_regno
< FIRST_PSEUDO_REGISTER
1161 ? hard_regno_nregs
[x_regno
][GET_MODE (x
)]
1165 /* If this is a SUBREG of a hard reg, we can see exactly which
1166 registers are being modified. Otherwise, handle normally. */
1167 if (REG_P (SUBREG_REG (x
))
1168 && REGNO (SUBREG_REG (x
)) < FIRST_PSEUDO_REGISTER
)
1170 unsigned int inner_regno
= subreg_regno (x
);
1171 unsigned int inner_endregno
1172 = inner_regno
+ (inner_regno
< FIRST_PSEUDO_REGISTER
1173 ? hard_regno_nregs
[inner_regno
][GET_MODE (x
)] : 1);
1175 return endregno
> inner_regno
&& regno
< inner_endregno
;
1181 if (&SET_DEST (x
) != loc
1182 /* Note setting a SUBREG counts as referring to the REG it is in for
1183 a pseudo but not for hard registers since we can
1184 treat each word individually. */
1185 && ((GET_CODE (SET_DEST (x
)) == SUBREG
1186 && loc
!= &SUBREG_REG (SET_DEST (x
))
1187 && REG_P (SUBREG_REG (SET_DEST (x
)))
1188 && REGNO (SUBREG_REG (SET_DEST (x
))) >= FIRST_PSEUDO_REGISTER
1189 && refers_to_regno_p (regno
, endregno
,
1190 SUBREG_REG (SET_DEST (x
)), loc
))
1191 || (!REG_P (SET_DEST (x
))
1192 && refers_to_regno_p (regno
, endregno
, SET_DEST (x
), loc
))))
1195 if (code
== CLOBBER
|| loc
== &SET_SRC (x
))
1204 /* X does not match, so try its subexpressions. */
1206 fmt
= GET_RTX_FORMAT (code
);
1207 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1209 if (fmt
[i
] == 'e' && loc
!= &XEXP (x
, i
))
1217 if (refers_to_regno_p (regno
, endregno
, XEXP (x
, i
), loc
))
1220 else if (fmt
[i
] == 'E')
1223 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1224 if (loc
!= &XVECEXP (x
, i
, j
)
1225 && refers_to_regno_p (regno
, endregno
, XVECEXP (x
, i
, j
), loc
))
1232 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1233 we check if any register number in X conflicts with the relevant register
1234 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1235 contains a MEM (we don't bother checking for memory addresses that can't
1236 conflict because we expect this to be a rare case. */
1239 reg_overlap_mentioned_p (rtx x
, rtx in
)
1241 unsigned int regno
, endregno
;
1243 /* If either argument is a constant, then modifying X can not
1244 affect IN. Here we look at IN, we can profitably combine
1245 CONSTANT_P (x) with the switch statement below. */
1246 if (CONSTANT_P (in
))
1250 switch (GET_CODE (x
))
1252 case STRICT_LOW_PART
:
1255 /* Overly conservative. */
1260 regno
= REGNO (SUBREG_REG (x
));
1261 if (regno
< FIRST_PSEUDO_REGISTER
)
1262 regno
= subreg_regno (x
);
1268 endregno
= regno
+ (regno
< FIRST_PSEUDO_REGISTER
1269 ? hard_regno_nregs
[regno
][GET_MODE (x
)] : 1);
1270 return refers_to_regno_p (regno
, endregno
, in
, (rtx
*) 0);
1280 fmt
= GET_RTX_FORMAT (GET_CODE (in
));
1281 for (i
= GET_RTX_LENGTH (GET_CODE (in
)) - 1; i
>= 0; i
--)
1284 if (reg_overlap_mentioned_p (x
, XEXP (in
, i
)))
1287 else if (fmt
[i
] == 'E')
1290 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; --j
)
1291 if (reg_overlap_mentioned_p (x
, XVECEXP (in
, i
, j
)))
1301 return reg_mentioned_p (x
, in
);
1307 /* If any register in here refers to it we return true. */
1308 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1309 if (XEXP (XVECEXP (x
, 0, i
), 0) != 0
1310 && reg_overlap_mentioned_p (XEXP (XVECEXP (x
, 0, i
), 0), in
))
1316 gcc_assert (CONSTANT_P (x
));
1321 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1322 (X would be the pattern of an insn).
1323 FUN receives two arguments:
1324 the REG, MEM, CC0 or PC being stored in or clobbered,
1325 the SET or CLOBBER rtx that does the store.
1327 If the item being stored in or clobbered is a SUBREG of a hard register,
1328 the SUBREG will be passed. */
1331 note_stores (rtx x
, void (*fun
) (rtx
, rtx
, void *), void *data
)
1335 if (GET_CODE (x
) == COND_EXEC
)
1336 x
= COND_EXEC_CODE (x
);
1338 if (GET_CODE (x
) == SET
|| GET_CODE (x
) == CLOBBER
)
1340 rtx dest
= SET_DEST (x
);
1342 while ((GET_CODE (dest
) == SUBREG
1343 && (!REG_P (SUBREG_REG (dest
))
1344 || REGNO (SUBREG_REG (dest
)) >= FIRST_PSEUDO_REGISTER
))
1345 || GET_CODE (dest
) == ZERO_EXTRACT
1346 || GET_CODE (dest
) == STRICT_LOW_PART
)
1347 dest
= XEXP (dest
, 0);
1349 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1350 each of whose first operand is a register. */
1351 if (GET_CODE (dest
) == PARALLEL
)
1353 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1354 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
1355 (*fun
) (XEXP (XVECEXP (dest
, 0, i
), 0), x
, data
);
1358 (*fun
) (dest
, x
, data
);
1361 else if (GET_CODE (x
) == PARALLEL
)
1362 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1363 note_stores (XVECEXP (x
, 0, i
), fun
, data
);
1366 /* Like notes_stores, but call FUN for each expression that is being
1367 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1368 FUN for each expression, not any interior subexpressions. FUN receives a
1369 pointer to the expression and the DATA passed to this function.
1371 Note that this is not quite the same test as that done in reg_referenced_p
1372 since that considers something as being referenced if it is being
1373 partially set, while we do not. */
1376 note_uses (rtx
*pbody
, void (*fun
) (rtx
*, void *), void *data
)
1381 switch (GET_CODE (body
))
1384 (*fun
) (&COND_EXEC_TEST (body
), data
);
1385 note_uses (&COND_EXEC_CODE (body
), fun
, data
);
1389 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1390 note_uses (&XVECEXP (body
, 0, i
), fun
, data
);
1394 (*fun
) (&XEXP (body
, 0), data
);
1398 for (i
= ASM_OPERANDS_INPUT_LENGTH (body
) - 1; i
>= 0; i
--)
1399 (*fun
) (&ASM_OPERANDS_INPUT (body
, i
), data
);
1403 (*fun
) (&TRAP_CONDITION (body
), data
);
1407 (*fun
) (&XEXP (body
, 0), data
);
1411 case UNSPEC_VOLATILE
:
1412 for (i
= XVECLEN (body
, 0) - 1; i
>= 0; i
--)
1413 (*fun
) (&XVECEXP (body
, 0, i
), data
);
1417 if (MEM_P (XEXP (body
, 0)))
1418 (*fun
) (&XEXP (XEXP (body
, 0), 0), data
);
1423 rtx dest
= SET_DEST (body
);
1425 /* For sets we replace everything in source plus registers in memory
1426 expression in store and operands of a ZERO_EXTRACT. */
1427 (*fun
) (&SET_SRC (body
), data
);
1429 if (GET_CODE (dest
) == ZERO_EXTRACT
)
1431 (*fun
) (&XEXP (dest
, 1), data
);
1432 (*fun
) (&XEXP (dest
, 2), data
);
1435 while (GET_CODE (dest
) == SUBREG
|| GET_CODE (dest
) == STRICT_LOW_PART
)
1436 dest
= XEXP (dest
, 0);
1439 (*fun
) (&XEXP (dest
, 0), data
);
1444 /* All the other possibilities never store. */
1445 (*fun
) (pbody
, data
);
1450 /* Return nonzero if X's old contents don't survive after INSN.
1451 This will be true if X is (cc0) or if X is a register and
1452 X dies in INSN or because INSN entirely sets X.
1454 "Entirely set" means set directly and not through a SUBREG, or
1455 ZERO_EXTRACT, so no trace of the old contents remains.
1456 Likewise, REG_INC does not count.
1458 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1459 but for this use that makes no difference, since regs don't overlap
1460 during their lifetimes. Therefore, this function may be used
1461 at any time after deaths have been computed (in flow.c).
1463 If REG is a hard reg that occupies multiple machine registers, this
1464 function will only return 1 if each of those registers will be replaced
1468 dead_or_set_p (rtx insn
, rtx x
)
1470 unsigned int regno
, last_regno
;
1473 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1474 if (GET_CODE (x
) == CC0
)
1477 gcc_assert (REG_P (x
));
1480 last_regno
= (regno
>= FIRST_PSEUDO_REGISTER
? regno
1481 : regno
+ hard_regno_nregs
[regno
][GET_MODE (x
)] - 1);
1483 for (i
= regno
; i
<= last_regno
; i
++)
1484 if (! dead_or_set_regno_p (insn
, i
))
1490 /* Return TRUE iff DEST is a register or subreg of a register and
1491 doesn't change the number of words of the inner register, and any
1492 part of the register is TEST_REGNO. */
1495 covers_regno_no_parallel_p (rtx dest
, unsigned int test_regno
)
1497 unsigned int regno
, endregno
;
1499 if (GET_CODE (dest
) == SUBREG
1500 && (((GET_MODE_SIZE (GET_MODE (dest
))
1501 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)
1502 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest
)))
1503 + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
)))
1504 dest
= SUBREG_REG (dest
);
1509 regno
= REGNO (dest
);
1510 endregno
= (regno
>= FIRST_PSEUDO_REGISTER
? regno
+ 1
1511 : regno
+ hard_regno_nregs
[regno
][GET_MODE (dest
)]);
1512 return (test_regno
>= regno
&& test_regno
< endregno
);
1515 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1516 any member matches the covers_regno_no_parallel_p criteria. */
1519 covers_regno_p (rtx dest
, unsigned int test_regno
)
1521 if (GET_CODE (dest
) == PARALLEL
)
1523 /* Some targets place small structures in registers for return
1524 values of functions, and those registers are wrapped in
1525 PARALLELs that we may see as the destination of a SET. */
1528 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
1530 rtx inner
= XEXP (XVECEXP (dest
, 0, i
), 0);
1531 if (inner
!= NULL_RTX
1532 && covers_regno_no_parallel_p (inner
, test_regno
))
1539 return covers_regno_no_parallel_p (dest
, test_regno
);
1542 /* Utility function for dead_or_set_p to check an individual register. Also
1543 called from flow.c. */
1546 dead_or_set_regno_p (rtx insn
, unsigned int test_regno
)
1550 /* See if there is a death note for something that includes TEST_REGNO. */
1551 if (find_regno_note (insn
, REG_DEAD
, test_regno
))
1555 && find_regno_fusage (insn
, CLOBBER
, test_regno
))
1558 pattern
= PATTERN (insn
);
1560 if (GET_CODE (pattern
) == COND_EXEC
)
1561 pattern
= COND_EXEC_CODE (pattern
);
1563 if (GET_CODE (pattern
) == SET
)
1564 return covers_regno_p (SET_DEST (pattern
), test_regno
);
1565 else if (GET_CODE (pattern
) == PARALLEL
)
1569 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
1571 rtx body
= XVECEXP (pattern
, 0, i
);
1573 if (GET_CODE (body
) == COND_EXEC
)
1574 body
= COND_EXEC_CODE (body
);
1576 if ((GET_CODE (body
) == SET
|| GET_CODE (body
) == CLOBBER
)
1577 && covers_regno_p (SET_DEST (body
), test_regno
))
1585 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1586 If DATUM is nonzero, look for one whose datum is DATUM. */
1589 find_reg_note (rtx insn
, enum reg_note kind
, rtx datum
)
1595 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1596 if (! INSN_P (insn
))
1600 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1601 if (REG_NOTE_KIND (link
) == kind
)
1606 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1607 if (REG_NOTE_KIND (link
) == kind
&& datum
== XEXP (link
, 0))
1612 /* Return the reg-note of kind KIND in insn INSN which applies to register
1613 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1614 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1615 it might be the case that the note overlaps REGNO. */
1618 find_regno_note (rtx insn
, enum reg_note kind
, unsigned int regno
)
1622 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1623 if (! INSN_P (insn
))
1626 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1627 if (REG_NOTE_KIND (link
) == kind
1628 /* Verify that it is a register, so that scratch and MEM won't cause a
1630 && REG_P (XEXP (link
, 0))
1631 && REGNO (XEXP (link
, 0)) <= regno
1632 && ((REGNO (XEXP (link
, 0))
1633 + (REGNO (XEXP (link
, 0)) >= FIRST_PSEUDO_REGISTER
? 1
1634 : hard_regno_nregs
[REGNO (XEXP (link
, 0))]
1635 [GET_MODE (XEXP (link
, 0))]))
1641 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1645 find_reg_equal_equiv_note (rtx insn
)
1651 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1652 if (REG_NOTE_KIND (link
) == REG_EQUAL
1653 || REG_NOTE_KIND (link
) == REG_EQUIV
)
1655 if (single_set (insn
) == 0)
1662 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1663 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1666 find_reg_fusage (rtx insn
, enum rtx_code code
, rtx datum
)
1668 /* If it's not a CALL_INSN, it can't possibly have a
1669 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1679 for (link
= CALL_INSN_FUNCTION_USAGE (insn
);
1681 link
= XEXP (link
, 1))
1682 if (GET_CODE (XEXP (link
, 0)) == code
1683 && rtx_equal_p (datum
, XEXP (XEXP (link
, 0), 0)))
1688 unsigned int regno
= REGNO (datum
);
1690 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1691 to pseudo registers, so don't bother checking. */
1693 if (regno
< FIRST_PSEUDO_REGISTER
)
1695 unsigned int end_regno
1696 = regno
+ hard_regno_nregs
[regno
][GET_MODE (datum
)];
1699 for (i
= regno
; i
< end_regno
; i
++)
1700 if (find_regno_fusage (insn
, code
, i
))
1708 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1709 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1712 find_regno_fusage (rtx insn
, enum rtx_code code
, unsigned int regno
)
1716 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1717 to pseudo registers, so don't bother checking. */
1719 if (regno
>= FIRST_PSEUDO_REGISTER
1723 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1725 unsigned int regnote
;
1728 if (GET_CODE (op
= XEXP (link
, 0)) == code
1729 && REG_P (reg
= XEXP (op
, 0))
1730 && (regnote
= REGNO (reg
)) <= regno
1731 && regnote
+ hard_regno_nregs
[regnote
][GET_MODE (reg
)] > regno
)
1738 /* Return true if INSN is a call to a pure function. */
1741 pure_call_p (rtx insn
)
1745 if (!CALL_P (insn
) || ! CONST_OR_PURE_CALL_P (insn
))
1748 /* Look for the note that differentiates const and pure functions. */
1749 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1753 if (GET_CODE (u
= XEXP (link
, 0)) == USE
1754 && MEM_P (m
= XEXP (u
, 0)) && GET_MODE (m
) == BLKmode
1755 && GET_CODE (XEXP (m
, 0)) == SCRATCH
)
1762 /* Remove register note NOTE from the REG_NOTES of INSN. */
1765 remove_note (rtx insn
, rtx note
)
1769 if (note
== NULL_RTX
)
1772 if (REG_NOTES (insn
) == note
)
1774 REG_NOTES (insn
) = XEXP (note
, 1);
1778 for (link
= REG_NOTES (insn
); link
; link
= XEXP (link
, 1))
1779 if (XEXP (link
, 1) == note
)
1781 XEXP (link
, 1) = XEXP (note
, 1);
1788 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1789 return 1 if it is found. A simple equality test is used to determine if
1793 in_expr_list_p (rtx listp
, rtx node
)
1797 for (x
= listp
; x
; x
= XEXP (x
, 1))
1798 if (node
== XEXP (x
, 0))
1804 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1805 remove that entry from the list if it is found.
1807 A simple equality test is used to determine if NODE matches. */
1810 remove_node_from_expr_list (rtx node
, rtx
*listp
)
1813 rtx prev
= NULL_RTX
;
1817 if (node
== XEXP (temp
, 0))
1819 /* Splice the node out of the list. */
1821 XEXP (prev
, 1) = XEXP (temp
, 1);
1823 *listp
= XEXP (temp
, 1);
1829 temp
= XEXP (temp
, 1);
1833 /* Nonzero if X contains any volatile instructions. These are instructions
1834 which may cause unpredictable machine state instructions, and thus no
1835 instructions should be moved or combined across them. This includes
1836 only volatile asms and UNSPEC_VOLATILE instructions. */
1839 volatile_insn_p (rtx x
)
1843 code
= GET_CODE (x
);
1863 case UNSPEC_VOLATILE
:
1864 /* case TRAP_IF: This isn't clear yet. */
1869 if (MEM_VOLATILE_P (x
))
1876 /* Recursively scan the operands of this expression. */
1879 const char *fmt
= GET_RTX_FORMAT (code
);
1882 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1886 if (volatile_insn_p (XEXP (x
, i
)))
1889 else if (fmt
[i
] == 'E')
1892 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1893 if (volatile_insn_p (XVECEXP (x
, i
, j
)))
1901 /* Nonzero if X contains any volatile memory references
1902 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
1905 volatile_refs_p (rtx x
)
1909 code
= GET_CODE (x
);
1927 case UNSPEC_VOLATILE
:
1933 if (MEM_VOLATILE_P (x
))
1940 /* Recursively scan the operands of this expression. */
1943 const char *fmt
= GET_RTX_FORMAT (code
);
1946 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1950 if (volatile_refs_p (XEXP (x
, i
)))
1953 else if (fmt
[i
] == 'E')
1956 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1957 if (volatile_refs_p (XVECEXP (x
, i
, j
)))
1965 /* Similar to above, except that it also rejects register pre- and post-
1969 side_effects_p (rtx x
)
1973 code
= GET_CODE (x
);
1991 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
1992 when some combination can't be done. If we see one, don't think
1993 that we can simplify the expression. */
1994 return (GET_MODE (x
) != VOIDmode
);
2003 case UNSPEC_VOLATILE
:
2004 /* case TRAP_IF: This isn't clear yet. */
2010 if (MEM_VOLATILE_P (x
))
2017 /* Recursively scan the operands of this expression. */
2020 const char *fmt
= GET_RTX_FORMAT (code
);
2023 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2027 if (side_effects_p (XEXP (x
, i
)))
2030 else if (fmt
[i
] == 'E')
2033 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2034 if (side_effects_p (XVECEXP (x
, i
, j
)))
2042 enum may_trap_p_flags
2044 MTP_UNALIGNED_MEMS
= 1,
2047 /* Return nonzero if evaluating rtx X might cause a trap.
2048 (FLAGS & MTP_UNALIGNED_MEMS) controls whether nonzero is returned for
2049 unaligned memory accesses on strict alignment machines. If
2050 (FLAGS & AFTER_MOVE) is true, returns nonzero even in case the expression
2051 cannot trap at its current location, but it might become trapping if moved
2055 may_trap_p_1 (rtx x
, unsigned flags
)
2060 bool unaligned_mems
= (flags
& MTP_UNALIGNED_MEMS
) != 0;
2064 code
= GET_CODE (x
);
2067 /* Handle these cases quickly. */
2081 case UNSPEC_VOLATILE
:
2086 return MEM_VOLATILE_P (x
);
2088 /* Memory ref can trap unless it's a static var or a stack slot. */
2090 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2091 reference; moving it out of condition might cause its address
2093 !(flags
& MTP_AFTER_MOVE
)
2095 && (!STRICT_ALIGNMENT
|| !unaligned_mems
))
2098 rtx_addr_can_trap_p_1 (XEXP (x
, 0), GET_MODE (x
), unaligned_mems
);
2100 /* Division by a non-constant might trap. */
2105 if (HONOR_SNANS (GET_MODE (x
)))
2107 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
)))
2108 return flag_trapping_math
;
2109 if (!CONSTANT_P (XEXP (x
, 1)) || (XEXP (x
, 1) == const0_rtx
))
2114 /* An EXPR_LIST is used to represent a function call. This
2115 certainly may trap. */
2124 /* Some floating point comparisons may trap. */
2125 if (!flag_trapping_math
)
2127 /* ??? There is no machine independent way to check for tests that trap
2128 when COMPARE is used, though many targets do make this distinction.
2129 For instance, sparc uses CCFPE for compares which generate exceptions
2130 and CCFP for compares which do not generate exceptions. */
2131 if (HONOR_NANS (GET_MODE (x
)))
2133 /* But often the compare has some CC mode, so check operand
2135 if (HONOR_NANS (GET_MODE (XEXP (x
, 0)))
2136 || HONOR_NANS (GET_MODE (XEXP (x
, 1))))
2142 if (HONOR_SNANS (GET_MODE (x
)))
2144 /* Often comparison is CC mode, so check operand modes. */
2145 if (HONOR_SNANS (GET_MODE (XEXP (x
, 0)))
2146 || HONOR_SNANS (GET_MODE (XEXP (x
, 1))))
2151 /* Conversion of floating point might trap. */
2152 if (flag_trapping_math
&& HONOR_NANS (GET_MODE (XEXP (x
, 0))))
2159 /* These operations don't trap even with floating point. */
2163 /* Any floating arithmetic may trap. */
2164 if (SCALAR_FLOAT_MODE_P (GET_MODE (x
))
2165 && flag_trapping_math
)
2169 fmt
= GET_RTX_FORMAT (code
);
2170 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2174 if (may_trap_p_1 (XEXP (x
, i
), flags
))
2177 else if (fmt
[i
] == 'E')
2180 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2181 if (may_trap_p_1 (XVECEXP (x
, i
, j
), flags
))
2188 /* Return nonzero if evaluating rtx X might cause a trap. */
2193 return may_trap_p_1 (x
, 0);
2196 /* Return nonzero if evaluating rtx X might cause a trap, when the expression
2197 is moved from its current location by some optimization. */
2200 may_trap_after_code_motion_p (rtx x
)
2202 return may_trap_p_1 (x
, MTP_AFTER_MOVE
);
2205 /* Same as above, but additionally return nonzero if evaluating rtx X might
2206 cause a fault. We define a fault for the purpose of this function as a
2207 erroneous execution condition that cannot be encountered during the normal
2208 execution of a valid program; the typical example is an unaligned memory
2209 access on a strict alignment machine. The compiler guarantees that it
2210 doesn't generate code that will fault from a valid program, but this
2211 guarantee doesn't mean anything for individual instructions. Consider
2212 the following example:
2214 struct S { int d; union { char *cp; int *ip; }; };
2216 int foo(struct S *s)
2224 on a strict alignment machine. In a valid program, foo will never be
2225 invoked on a structure for which d is equal to 1 and the underlying
2226 unique field of the union not aligned on a 4-byte boundary, but the
2227 expression *s->ip might cause a fault if considered individually.
2229 At the RTL level, potentially problematic expressions will almost always
2230 verify may_trap_p; for example, the above dereference can be emitted as
2231 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2232 However, suppose that foo is inlined in a caller that causes s->cp to
2233 point to a local character variable and guarantees that s->d is not set
2234 to 1; foo may have been effectively translated into pseudo-RTL as:
2237 (set (reg:SI) (mem:SI (%fp - 7)))
2239 (set (reg:QI) (mem:QI (%fp - 7)))
2241 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2242 memory reference to a stack slot, but it will certainly cause a fault
2243 on a strict alignment machine. */
2246 may_trap_or_fault_p (rtx x
)
2248 return may_trap_p_1 (x
, MTP_UNALIGNED_MEMS
);
2251 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2252 i.e., an inequality. */
2255 inequality_comparisons_p (rtx x
)
2259 enum rtx_code code
= GET_CODE (x
);
2289 len
= GET_RTX_LENGTH (code
);
2290 fmt
= GET_RTX_FORMAT (code
);
2292 for (i
= 0; i
< len
; i
++)
2296 if (inequality_comparisons_p (XEXP (x
, i
)))
2299 else if (fmt
[i
] == 'E')
2302 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2303 if (inequality_comparisons_p (XVECEXP (x
, i
, j
)))
2311 /* Replace any occurrence of FROM in X with TO. The function does
2312 not enter into CONST_DOUBLE for the replace.
2314 Note that copying is not done so X must not be shared unless all copies
2315 are to be modified. */
2318 replace_rtx (rtx x
, rtx from
, rtx to
)
2323 /* The following prevents loops occurrence when we change MEM in
2324 CONST_DOUBLE onto the same CONST_DOUBLE. */
2325 if (x
!= 0 && GET_CODE (x
) == CONST_DOUBLE
)
2331 /* Allow this function to make replacements in EXPR_LISTs. */
2335 if (GET_CODE (x
) == SUBREG
)
2337 rtx
new = replace_rtx (SUBREG_REG (x
), from
, to
);
2339 if (GET_CODE (new) == CONST_INT
)
2341 x
= simplify_subreg (GET_MODE (x
), new,
2342 GET_MODE (SUBREG_REG (x
)),
2347 SUBREG_REG (x
) = new;
2351 else if (GET_CODE (x
) == ZERO_EXTEND
)
2353 rtx
new = replace_rtx (XEXP (x
, 0), from
, to
);
2355 if (GET_CODE (new) == CONST_INT
)
2357 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
2358 new, GET_MODE (XEXP (x
, 0)));
2367 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2368 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2371 XEXP (x
, i
) = replace_rtx (XEXP (x
, i
), from
, to
);
2372 else if (fmt
[i
] == 'E')
2373 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2374 XVECEXP (x
, i
, j
) = replace_rtx (XVECEXP (x
, i
, j
), from
, to
);
2380 /* Replace occurrences of the old label in *X with the new one.
2381 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2384 replace_label (rtx
*x
, void *data
)
2387 rtx old_label
= ((replace_label_data
*) data
)->r1
;
2388 rtx new_label
= ((replace_label_data
*) data
)->r2
;
2389 bool update_label_nuses
= ((replace_label_data
*) data
)->update_label_nuses
;
2394 if (GET_CODE (l
) == SYMBOL_REF
2395 && CONSTANT_POOL_ADDRESS_P (l
))
2397 rtx c
= get_pool_constant (l
);
2398 if (rtx_referenced_p (old_label
, c
))
2401 replace_label_data
*d
= (replace_label_data
*) data
;
2403 /* Create a copy of constant C; replace the label inside
2404 but do not update LABEL_NUSES because uses in constant pool
2406 new_c
= copy_rtx (c
);
2407 d
->update_label_nuses
= false;
2408 for_each_rtx (&new_c
, replace_label
, data
);
2409 d
->update_label_nuses
= update_label_nuses
;
2411 /* Add the new constant NEW_C to constant pool and replace
2412 the old reference to constant by new reference. */
2413 new_l
= XEXP (force_const_mem (get_pool_mode (l
), new_c
), 0);
2414 *x
= replace_rtx (l
, l
, new_l
);
2419 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2420 field. This is not handled by for_each_rtx because it doesn't
2421 handle unprinted ('0') fields. */
2422 if (JUMP_P (l
) && JUMP_LABEL (l
) == old_label
)
2423 JUMP_LABEL (l
) = new_label
;
2425 if ((GET_CODE (l
) == LABEL_REF
2426 || GET_CODE (l
) == INSN_LIST
)
2427 && XEXP (l
, 0) == old_label
)
2429 XEXP (l
, 0) = new_label
;
2430 if (update_label_nuses
)
2432 ++LABEL_NUSES (new_label
);
2433 --LABEL_NUSES (old_label
);
2441 /* When *BODY is equal to X or X is directly referenced by *BODY
2442 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2443 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2446 rtx_referenced_p_1 (rtx
*body
, void *x
)
2450 if (*body
== NULL_RTX
)
2451 return y
== NULL_RTX
;
2453 /* Return true if a label_ref *BODY refers to label Y. */
2454 if (GET_CODE (*body
) == LABEL_REF
&& LABEL_P (y
))
2455 return XEXP (*body
, 0) == y
;
2457 /* If *BODY is a reference to pool constant traverse the constant. */
2458 if (GET_CODE (*body
) == SYMBOL_REF
2459 && CONSTANT_POOL_ADDRESS_P (*body
))
2460 return rtx_referenced_p (y
, get_pool_constant (*body
));
2462 /* By default, compare the RTL expressions. */
2463 return rtx_equal_p (*body
, y
);
2466 /* Return true if X is referenced in BODY. */
2469 rtx_referenced_p (rtx x
, rtx body
)
2471 return for_each_rtx (&body
, rtx_referenced_p_1
, x
);
2474 /* If INSN is a tablejump return true and store the label (before jump table) to
2475 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2478 tablejump_p (rtx insn
, rtx
*labelp
, rtx
*tablep
)
2483 && (label
= JUMP_LABEL (insn
)) != NULL_RTX
2484 && (table
= next_active_insn (label
)) != NULL_RTX
2486 && (GET_CODE (PATTERN (table
)) == ADDR_VEC
2487 || GET_CODE (PATTERN (table
)) == ADDR_DIFF_VEC
))
2498 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2499 constant that is not in the constant pool and not in the condition
2500 of an IF_THEN_ELSE. */
2503 computed_jump_p_1 (rtx x
)
2505 enum rtx_code code
= GET_CODE (x
);
2524 return ! (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
2525 && CONSTANT_POOL_ADDRESS_P (XEXP (x
, 0)));
2528 return (computed_jump_p_1 (XEXP (x
, 1))
2529 || computed_jump_p_1 (XEXP (x
, 2)));
2535 fmt
= GET_RTX_FORMAT (code
);
2536 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2539 && computed_jump_p_1 (XEXP (x
, i
)))
2542 else if (fmt
[i
] == 'E')
2543 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2544 if (computed_jump_p_1 (XVECEXP (x
, i
, j
)))
2551 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2553 Tablejumps and casesi insns are not considered indirect jumps;
2554 we can recognize them by a (use (label_ref)). */
2557 computed_jump_p (rtx insn
)
2562 rtx pat
= PATTERN (insn
);
2564 if (find_reg_note (insn
, REG_LABEL
, NULL_RTX
))
2566 else if (GET_CODE (pat
) == PARALLEL
)
2568 int len
= XVECLEN (pat
, 0);
2569 int has_use_labelref
= 0;
2571 for (i
= len
- 1; i
>= 0; i
--)
2572 if (GET_CODE (XVECEXP (pat
, 0, i
)) == USE
2573 && (GET_CODE (XEXP (XVECEXP (pat
, 0, i
), 0))
2575 has_use_labelref
= 1;
2577 if (! has_use_labelref
)
2578 for (i
= len
- 1; i
>= 0; i
--)
2579 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
2580 && SET_DEST (XVECEXP (pat
, 0, i
)) == pc_rtx
2581 && computed_jump_p_1 (SET_SRC (XVECEXP (pat
, 0, i
))))
2584 else if (GET_CODE (pat
) == SET
2585 && SET_DEST (pat
) == pc_rtx
2586 && computed_jump_p_1 (SET_SRC (pat
)))
2592 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2593 calls. Processes the subexpressions of EXP and passes them to F. */
2595 for_each_rtx_1 (rtx exp
, int n
, rtx_function f
, void *data
)
2598 const char *format
= GET_RTX_FORMAT (GET_CODE (exp
));
2601 for (; format
[n
] != '\0'; n
++)
2608 result
= (*f
) (x
, data
);
2610 /* Do not traverse sub-expressions. */
2612 else if (result
!= 0)
2613 /* Stop the traversal. */
2617 /* There are no sub-expressions. */
2620 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2623 result
= for_each_rtx_1 (*x
, i
, f
, data
);
2631 if (XVEC (exp
, n
) == 0)
2633 for (j
= 0; j
< XVECLEN (exp
, n
); ++j
)
2636 x
= &XVECEXP (exp
, n
, j
);
2637 result
= (*f
) (x
, data
);
2639 /* Do not traverse sub-expressions. */
2641 else if (result
!= 0)
2642 /* Stop the traversal. */
2646 /* There are no sub-expressions. */
2649 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2652 result
= for_each_rtx_1 (*x
, i
, f
, data
);
2660 /* Nothing to do. */
2668 /* Traverse X via depth-first search, calling F for each
2669 sub-expression (including X itself). F is also passed the DATA.
2670 If F returns -1, do not traverse sub-expressions, but continue
2671 traversing the rest of the tree. If F ever returns any other
2672 nonzero value, stop the traversal, and return the value returned
2673 by F. Otherwise, return 0. This function does not traverse inside
2674 tree structure that contains RTX_EXPRs, or into sub-expressions
2675 whose format code is `0' since it is not known whether or not those
2676 codes are actually RTL.
2678 This routine is very general, and could (should?) be used to
2679 implement many of the other routines in this file. */
2682 for_each_rtx (rtx
*x
, rtx_function f
, void *data
)
2688 result
= (*f
) (x
, data
);
2690 /* Do not traverse sub-expressions. */
2692 else if (result
!= 0)
2693 /* Stop the traversal. */
2697 /* There are no sub-expressions. */
2700 i
= non_rtx_starting_operands
[GET_CODE (*x
)];
2704 return for_each_rtx_1 (*x
, i
, f
, data
);
2708 /* Searches X for any reference to REGNO, returning the rtx of the
2709 reference found if any. Otherwise, returns NULL_RTX. */
2712 regno_use_in (unsigned int regno
, rtx x
)
2718 if (REG_P (x
) && REGNO (x
) == regno
)
2721 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2722 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2726 if ((tem
= regno_use_in (regno
, XEXP (x
, i
))))
2729 else if (fmt
[i
] == 'E')
2730 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
2731 if ((tem
= regno_use_in (regno
, XVECEXP (x
, i
, j
))))
2738 /* Return a value indicating whether OP, an operand of a commutative
2739 operation, is preferred as the first or second operand. The higher
2740 the value, the stronger the preference for being the first operand.
2741 We use negative values to indicate a preference for the first operand
2742 and positive values for the second operand. */
2745 commutative_operand_precedence (rtx op
)
2747 enum rtx_code code
= GET_CODE (op
);
2749 /* Constants always come the second operand. Prefer "nice" constants. */
2750 if (code
== CONST_INT
)
2752 if (code
== CONST_DOUBLE
)
2754 op
= avoid_constant_pool_reference (op
);
2755 code
= GET_CODE (op
);
2757 switch (GET_RTX_CLASS (code
))
2760 if (code
== CONST_INT
)
2762 if (code
== CONST_DOUBLE
)
2767 /* SUBREGs of objects should come second. */
2768 if (code
== SUBREG
&& OBJECT_P (SUBREG_REG (op
)))
2771 if (!CONSTANT_P (op
))
2774 /* As for RTX_CONST_OBJ. */
2778 /* Complex expressions should be the first, so decrease priority
2782 case RTX_COMM_ARITH
:
2783 /* Prefer operands that are themselves commutative to be first.
2784 This helps to make things linear. In particular,
2785 (and (and (reg) (reg)) (not (reg))) is canonical. */
2789 /* If only one operand is a binary expression, it will be the first
2790 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
2791 is canonical, although it will usually be further simplified. */
2795 /* Then prefer NEG and NOT. */
2796 if (code
== NEG
|| code
== NOT
)
2804 /* Return 1 iff it is necessary to swap operands of commutative operation
2805 in order to canonicalize expression. */
2808 swap_commutative_operands_p (rtx x
, rtx y
)
2810 return (commutative_operand_precedence (x
)
2811 < commutative_operand_precedence (y
));
2814 /* Return 1 if X is an autoincrement side effect and the register is
2815 not the stack pointer. */
2819 switch (GET_CODE (x
))
2827 /* There are no REG_INC notes for SP. */
2828 if (XEXP (x
, 0) != stack_pointer_rtx
)
2836 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
2838 loc_mentioned_in_p (rtx
*loc
, rtx in
)
2840 enum rtx_code code
= GET_CODE (in
);
2841 const char *fmt
= GET_RTX_FORMAT (code
);
2844 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
2846 if (loc
== &in
->u
.fld
[i
].rt_rtx
)
2850 if (loc_mentioned_in_p (loc
, XEXP (in
, i
)))
2853 else if (fmt
[i
] == 'E')
2854 for (j
= XVECLEN (in
, i
) - 1; j
>= 0; j
--)
2855 if (loc_mentioned_in_p (loc
, XVECEXP (in
, i
, j
)))
2861 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
2862 and SUBREG_BYTE, return the bit offset where the subreg begins
2863 (counting from the least significant bit of the operand). */
2866 subreg_lsb_1 (enum machine_mode outer_mode
,
2867 enum machine_mode inner_mode
,
2868 unsigned int subreg_byte
)
2870 unsigned int bitpos
;
2874 /* A paradoxical subreg begins at bit position 0. */
2875 if (GET_MODE_BITSIZE (outer_mode
) > GET_MODE_BITSIZE (inner_mode
))
2878 if (WORDS_BIG_ENDIAN
!= BYTES_BIG_ENDIAN
)
2879 /* If the subreg crosses a word boundary ensure that
2880 it also begins and ends on a word boundary. */
2881 gcc_assert (!((subreg_byte
% UNITS_PER_WORD
2882 + GET_MODE_SIZE (outer_mode
)) > UNITS_PER_WORD
2883 && (subreg_byte
% UNITS_PER_WORD
2884 || GET_MODE_SIZE (outer_mode
) % UNITS_PER_WORD
)));
2886 if (WORDS_BIG_ENDIAN
)
2887 word
= (GET_MODE_SIZE (inner_mode
)
2888 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) / UNITS_PER_WORD
;
2890 word
= subreg_byte
/ UNITS_PER_WORD
;
2891 bitpos
= word
* BITS_PER_WORD
;
2893 if (BYTES_BIG_ENDIAN
)
2894 byte
= (GET_MODE_SIZE (inner_mode
)
2895 - (subreg_byte
+ GET_MODE_SIZE (outer_mode
))) % UNITS_PER_WORD
;
2897 byte
= subreg_byte
% UNITS_PER_WORD
;
2898 bitpos
+= byte
* BITS_PER_UNIT
;
2903 /* Given a subreg X, return the bit offset where the subreg begins
2904 (counting from the least significant bit of the reg). */
2909 return subreg_lsb_1 (GET_MODE (x
), GET_MODE (SUBREG_REG (x
)),
2913 /* This function returns the regno offset of a subreg expression.
2914 xregno - A regno of an inner hard subreg_reg (or what will become one).
2915 xmode - The mode of xregno.
2916 offset - The byte offset.
2917 ymode - The mode of a top level SUBREG (or what may become one).
2918 RETURN - The regno offset which would be used. */
2920 subreg_regno_offset (unsigned int xregno
, enum machine_mode xmode
,
2921 unsigned int offset
, enum machine_mode ymode
)
2923 int nregs_xmode
, nregs_ymode
, nregs_xmode_unit_int
;
2924 int mode_multiple
, nregs_multiple
;
2926 enum machine_mode xmode_unit
, xmode_unit_int
;
2928 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
2930 if (GET_MODE_INNER (xmode
) == VOIDmode
)
2933 xmode_unit
= GET_MODE_INNER (xmode
);
2935 if (FLOAT_MODE_P (xmode_unit
))
2937 xmode_unit_int
= int_mode_for_mode (xmode_unit
);
2938 if (xmode_unit_int
== BLKmode
)
2939 /* It's probably bad to be here; a port should have an integer mode
2940 that's the same size as anything of which it takes a SUBREG. */
2941 xmode_unit_int
= xmode_unit
;
2944 xmode_unit_int
= xmode_unit
;
2946 nregs_xmode_unit_int
= hard_regno_nregs
[xregno
][xmode_unit_int
];
2948 /* Adjust nregs_xmode to allow for 'holes'. */
2949 if (nregs_xmode_unit_int
!= hard_regno_nregs
[xregno
][xmode_unit
])
2950 nregs_xmode
= nregs_xmode_unit_int
* GET_MODE_NUNITS (xmode
);
2952 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
2954 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
2956 /* If this is a big endian paradoxical subreg, which uses more actual
2957 hard registers than the original register, we must return a negative
2958 offset so that we find the proper highpart of the register. */
2960 && nregs_ymode
> nregs_xmode
2961 && (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
2962 ? WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
))
2963 return nregs_xmode
- nregs_ymode
;
2965 if (offset
== 0 || nregs_xmode
== nregs_ymode
)
2968 /* Size of ymode must not be greater than the size of xmode. */
2969 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
2970 gcc_assert (mode_multiple
!= 0);
2972 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
2973 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
2974 return (y_offset
/ (mode_multiple
/ nregs_multiple
)) * nregs_ymode
;
2977 /* This function returns true when the offset is representable via
2978 subreg_offset in the given regno.
2979 xregno - A regno of an inner hard subreg_reg (or what will become one).
2980 xmode - The mode of xregno.
2981 offset - The byte offset.
2982 ymode - The mode of a top level SUBREG (or what may become one).
2983 RETURN - Whether the offset is representable. */
2985 subreg_offset_representable_p (unsigned int xregno
, enum machine_mode xmode
,
2986 unsigned int offset
, enum machine_mode ymode
)
2988 int nregs_xmode
, nregs_ymode
, nregs_xmode_unit
, nregs_xmode_unit_int
;
2989 int mode_multiple
, nregs_multiple
;
2991 enum machine_mode xmode_unit
, xmode_unit_int
;
2993 gcc_assert (xregno
< FIRST_PSEUDO_REGISTER
);
2995 if (GET_MODE_INNER (xmode
) == VOIDmode
)
2998 xmode_unit
= GET_MODE_INNER (xmode
);
3000 if (FLOAT_MODE_P (xmode_unit
))
3002 xmode_unit_int
= int_mode_for_mode (xmode_unit
);
3003 if (xmode_unit_int
== BLKmode
)
3004 /* It's probably bad to be here; a port should have an integer mode
3005 that's the same size as anything of which it takes a SUBREG. */
3006 xmode_unit_int
= xmode_unit
;
3009 xmode_unit_int
= xmode_unit
;
3011 nregs_xmode_unit
= hard_regno_nregs
[xregno
][xmode_unit
];
3012 nregs_xmode_unit_int
= hard_regno_nregs
[xregno
][xmode_unit_int
];
3014 /* If there are holes in a non-scalar mode in registers, we expect
3015 that it is made up of its units concatenated together. */
3016 if (nregs_xmode_unit
!= nregs_xmode_unit_int
)
3018 gcc_assert (nregs_xmode_unit
* GET_MODE_NUNITS (xmode
)
3019 == hard_regno_nregs
[xregno
][xmode
]);
3021 /* You can only ask for a SUBREG of a value with holes in the middle
3022 if you don't cross the holes. (Such a SUBREG should be done by
3023 picking a different register class, or doing it in memory if
3024 necessary.) An example of a value with holes is XCmode on 32-bit
3025 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3026 3 for each part, but in memory it's two 128-bit parts.
3027 Padding is assumed to be at the end (not necessarily the 'high part')
3029 if (nregs_xmode_unit
!= nregs_xmode_unit_int
3030 && (offset
/ GET_MODE_SIZE (xmode_unit_int
) + 1
3031 < GET_MODE_NUNITS (xmode
))
3032 && (offset
/ GET_MODE_SIZE (xmode_unit_int
)
3033 != ((offset
+ GET_MODE_SIZE (ymode
) - 1)
3034 / GET_MODE_SIZE (xmode_unit_int
))))
3037 nregs_xmode
= nregs_xmode_unit_int
* GET_MODE_NUNITS (xmode
);
3040 nregs_xmode
= hard_regno_nregs
[xregno
][xmode
];
3042 nregs_ymode
= hard_regno_nregs
[xregno
][ymode
];
3044 /* Paradoxical subregs are otherwise valid. */
3046 && nregs_ymode
> nregs_xmode
3047 && (GET_MODE_SIZE (ymode
) > UNITS_PER_WORD
3048 ? WORDS_BIG_ENDIAN
: BYTES_BIG_ENDIAN
))
3051 /* Lowpart subregs are otherwise valid. */
3052 if (offset
== subreg_lowpart_offset (ymode
, xmode
))
3055 /* This should always pass, otherwise we don't know how to verify
3056 the constraint. These conditions may be relaxed but
3057 subreg_regno_offset would need to be redesigned. */
3058 gcc_assert ((GET_MODE_SIZE (xmode
) % GET_MODE_SIZE (ymode
)) == 0);
3059 gcc_assert ((nregs_xmode
% nregs_ymode
) == 0);
3061 /* The XMODE value can be seen as a vector of NREGS_XMODE
3062 values. The subreg must represent a lowpart of given field.
3063 Compute what field it is. */
3064 offset
-= subreg_lowpart_offset (ymode
,
3065 mode_for_size (GET_MODE_BITSIZE (xmode
)
3069 /* Size of ymode must not be greater than the size of xmode. */
3070 mode_multiple
= GET_MODE_SIZE (xmode
) / GET_MODE_SIZE (ymode
);
3071 gcc_assert (mode_multiple
!= 0);
3073 y_offset
= offset
/ GET_MODE_SIZE (ymode
);
3074 nregs_multiple
= nregs_xmode
/ nregs_ymode
;
3076 gcc_assert ((offset
% GET_MODE_SIZE (ymode
)) == 0);
3077 gcc_assert ((mode_multiple
% nregs_multiple
) == 0);
3079 return (!(y_offset
% (mode_multiple
/ nregs_multiple
)));
3082 /* Return the final regno that a subreg expression refers to. */
3084 subreg_regno (rtx x
)
3087 rtx subreg
= SUBREG_REG (x
);
3088 int regno
= REGNO (subreg
);
3090 ret
= regno
+ subreg_regno_offset (regno
,
3097 struct parms_set_data
3103 /* Helper function for noticing stores to parameter registers. */
3105 parms_set (rtx x
, rtx pat ATTRIBUTE_UNUSED
, void *data
)
3107 struct parms_set_data
*d
= data
;
3108 if (REG_P (x
) && REGNO (x
) < FIRST_PSEUDO_REGISTER
3109 && TEST_HARD_REG_BIT (d
->regs
, REGNO (x
)))
3111 CLEAR_HARD_REG_BIT (d
->regs
, REGNO (x
));
3116 /* Look backward for first parameter to be loaded.
3117 Note that loads of all parameters will not necessarily be
3118 found if CSE has eliminated some of them (e.g., an argument
3119 to the outer function is passed down as a parameter).
3120 Do not skip BOUNDARY. */
3122 find_first_parameter_load (rtx call_insn
, rtx boundary
)
3124 struct parms_set_data parm
;
3125 rtx p
, before
, first_set
;
3127 /* Since different machines initialize their parameter registers
3128 in different orders, assume nothing. Collect the set of all
3129 parameter registers. */
3130 CLEAR_HARD_REG_SET (parm
.regs
);
3132 for (p
= CALL_INSN_FUNCTION_USAGE (call_insn
); p
; p
= XEXP (p
, 1))
3133 if (GET_CODE (XEXP (p
, 0)) == USE
3134 && REG_P (XEXP (XEXP (p
, 0), 0)))
3136 gcc_assert (REGNO (XEXP (XEXP (p
, 0), 0)) < FIRST_PSEUDO_REGISTER
);
3138 /* We only care about registers which can hold function
3140 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p
, 0), 0))))
3143 SET_HARD_REG_BIT (parm
.regs
, REGNO (XEXP (XEXP (p
, 0), 0)));
3147 first_set
= call_insn
;
3149 /* Search backward for the first set of a register in this set. */
3150 while (parm
.nregs
&& before
!= boundary
)
3152 before
= PREV_INSN (before
);
3154 /* It is possible that some loads got CSEed from one call to
3155 another. Stop in that case. */
3156 if (CALL_P (before
))
3159 /* Our caller needs either ensure that we will find all sets
3160 (in case code has not been optimized yet), or take care
3161 for possible labels in a way by setting boundary to preceding
3163 if (LABEL_P (before
))
3165 gcc_assert (before
== boundary
);
3169 if (INSN_P (before
))
3171 int nregs_old
= parm
.nregs
;
3172 note_stores (PATTERN (before
), parms_set
, &parm
);
3173 /* If we found something that did not set a parameter reg,
3174 we're done. Do not keep going, as that might result
3175 in hoisting an insn before the setting of a pseudo
3176 that is used by the hoisted insn. */
3177 if (nregs_old
!= parm
.nregs
)
3186 /* Return true if we should avoid inserting code between INSN and preceding
3187 call instruction. */
3190 keep_with_call_p (rtx insn
)
3194 if (INSN_P (insn
) && (set
= single_set (insn
)) != NULL
)
3196 if (REG_P (SET_DEST (set
))
3197 && REGNO (SET_DEST (set
)) < FIRST_PSEUDO_REGISTER
3198 && fixed_regs
[REGNO (SET_DEST (set
))]
3199 && general_operand (SET_SRC (set
), VOIDmode
))
3201 if (REG_P (SET_SRC (set
))
3202 && FUNCTION_VALUE_REGNO_P (REGNO (SET_SRC (set
)))
3203 && REG_P (SET_DEST (set
))
3204 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
3206 /* There may be a stack pop just after the call and before the store
3207 of the return register. Search for the actual store when deciding
3208 if we can break or not. */
3209 if (SET_DEST (set
) == stack_pointer_rtx
)
3211 rtx i2
= next_nonnote_insn (insn
);
3212 if (i2
&& keep_with_call_p (i2
))
3219 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3220 to non-complex jumps. That is, direct unconditional, conditional,
3221 and tablejumps, but not computed jumps or returns. It also does
3222 not apply to the fallthru case of a conditional jump. */
3225 label_is_jump_target_p (rtx label
, rtx jump_insn
)
3227 rtx tmp
= JUMP_LABEL (jump_insn
);
3232 if (tablejump_p (jump_insn
, NULL
, &tmp
))
3234 rtvec vec
= XVEC (PATTERN (tmp
),
3235 GET_CODE (PATTERN (tmp
)) == ADDR_DIFF_VEC
);
3236 int i
, veclen
= GET_NUM_ELEM (vec
);
3238 for (i
= 0; i
< veclen
; ++i
)
3239 if (XEXP (RTVEC_ELT (vec
, i
), 0) == label
)
3247 /* Return an estimate of the cost of computing rtx X.
3248 One use is in cse, to decide which expression to keep in the hash table.
3249 Another is in rtl generation, to pick the cheapest way to multiply.
3250 Other uses like the latter are expected in the future. */
3253 rtx_cost (rtx x
, enum rtx_code outer_code ATTRIBUTE_UNUSED
)
3263 /* Compute the default costs of certain things.
3264 Note that targetm.rtx_costs can override the defaults. */
3266 code
= GET_CODE (x
);
3270 total
= COSTS_N_INSNS (5);
3276 total
= COSTS_N_INSNS (7);
3279 /* Used in combine.c as a marker. */
3283 total
= COSTS_N_INSNS (1);
3293 /* If we can't tie these modes, make this expensive. The larger
3294 the mode, the more expensive it is. */
3295 if (! MODES_TIEABLE_P (GET_MODE (x
), GET_MODE (SUBREG_REG (x
))))
3296 return COSTS_N_INSNS (2
3297 + GET_MODE_SIZE (GET_MODE (x
)) / UNITS_PER_WORD
);
3301 if (targetm
.rtx_costs (x
, code
, outer_code
, &total
))
3306 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3307 which is already in total. */
3309 fmt
= GET_RTX_FORMAT (code
);
3310 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3312 total
+= rtx_cost (XEXP (x
, i
), code
);
3313 else if (fmt
[i
] == 'E')
3314 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
3315 total
+= rtx_cost (XVECEXP (x
, i
, j
), code
);
3320 /* Return cost of address expression X.
3321 Expect that X is properly formed address reference. */
3324 address_cost (rtx x
, enum machine_mode mode
)
3326 /* We may be asked for cost of various unusual addresses, such as operands
3327 of push instruction. It is not worthwhile to complicate writing
3328 of the target hook by such cases. */
3330 if (!memory_address_p (mode
, x
))
3333 return targetm
.address_cost (x
);
3336 /* If the target doesn't override, compute the cost as with arithmetic. */
3339 default_address_cost (rtx x
)
3341 return rtx_cost (x
, MEM
);
3345 unsigned HOST_WIDE_INT
3346 nonzero_bits (rtx x
, enum machine_mode mode
)
3348 return cached_nonzero_bits (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3352 num_sign_bit_copies (rtx x
, enum machine_mode mode
)
3354 return cached_num_sign_bit_copies (x
, mode
, NULL_RTX
, VOIDmode
, 0);
3357 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3358 It avoids exponential behavior in nonzero_bits1 when X has
3359 identical subexpressions on the first or the second level. */
3361 static unsigned HOST_WIDE_INT
3362 cached_nonzero_bits (rtx x
, enum machine_mode mode
, rtx known_x
,
3363 enum machine_mode known_mode
,
3364 unsigned HOST_WIDE_INT known_ret
)
3366 if (x
== known_x
&& mode
== known_mode
)
3369 /* Try to find identical subexpressions. If found call
3370 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3371 precomputed value for the subexpression as KNOWN_RET. */
3373 if (ARITHMETIC_P (x
))
3375 rtx x0
= XEXP (x
, 0);
3376 rtx x1
= XEXP (x
, 1);
3378 /* Check the first level. */
3380 return nonzero_bits1 (x
, mode
, x0
, mode
,
3381 cached_nonzero_bits (x0
, mode
, known_x
,
3382 known_mode
, known_ret
));
3384 /* Check the second level. */
3385 if (ARITHMETIC_P (x0
)
3386 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
3387 return nonzero_bits1 (x
, mode
, x1
, mode
,
3388 cached_nonzero_bits (x1
, mode
, known_x
,
3389 known_mode
, known_ret
));
3391 if (ARITHMETIC_P (x1
)
3392 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
3393 return nonzero_bits1 (x
, mode
, x0
, mode
,
3394 cached_nonzero_bits (x0
, mode
, known_x
,
3395 known_mode
, known_ret
));
3398 return nonzero_bits1 (x
, mode
, known_x
, known_mode
, known_ret
);
3401 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3402 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3403 is less useful. We can't allow both, because that results in exponential
3404 run time recursion. There is a nullstone testcase that triggered
3405 this. This macro avoids accidental uses of num_sign_bit_copies. */
3406 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3408 /* Given an expression, X, compute which bits in X can be nonzero.
3409 We don't care about bits outside of those defined in MODE.
3411 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3412 an arithmetic operation, we can do better. */
3414 static unsigned HOST_WIDE_INT
3415 nonzero_bits1 (rtx x
, enum machine_mode mode
, rtx known_x
,
3416 enum machine_mode known_mode
,
3417 unsigned HOST_WIDE_INT known_ret
)
3419 unsigned HOST_WIDE_INT nonzero
= GET_MODE_MASK (mode
);
3420 unsigned HOST_WIDE_INT inner_nz
;
3422 unsigned int mode_width
= GET_MODE_BITSIZE (mode
);
3424 /* For floating-point values, assume all bits are needed. */
3425 if (FLOAT_MODE_P (GET_MODE (x
)) || FLOAT_MODE_P (mode
))
3428 /* If X is wider than MODE, use its mode instead. */
3429 if (GET_MODE_BITSIZE (GET_MODE (x
)) > mode_width
)
3431 mode
= GET_MODE (x
);
3432 nonzero
= GET_MODE_MASK (mode
);
3433 mode_width
= GET_MODE_BITSIZE (mode
);
3436 if (mode_width
> HOST_BITS_PER_WIDE_INT
)
3437 /* Our only callers in this case look for single bit values. So
3438 just return the mode mask. Those tests will then be false. */
3441 #ifndef WORD_REGISTER_OPERATIONS
3442 /* If MODE is wider than X, but both are a single word for both the host
3443 and target machines, we can compute this from which bits of the
3444 object might be nonzero in its own mode, taking into account the fact
3445 that on many CISC machines, accessing an object in a wider mode
3446 causes the high-order bits to become undefined. So they are
3447 not known to be zero. */
3449 if (GET_MODE (x
) != VOIDmode
&& GET_MODE (x
) != mode
3450 && GET_MODE_BITSIZE (GET_MODE (x
)) <= BITS_PER_WORD
3451 && GET_MODE_BITSIZE (GET_MODE (x
)) <= HOST_BITS_PER_WIDE_INT
3452 && GET_MODE_BITSIZE (mode
) > GET_MODE_BITSIZE (GET_MODE (x
)))
3454 nonzero
&= cached_nonzero_bits (x
, GET_MODE (x
),
3455 known_x
, known_mode
, known_ret
);
3456 nonzero
|= GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
));
3461 code
= GET_CODE (x
);
3465 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3466 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3467 all the bits above ptr_mode are known to be zero. */
3468 if (POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
3470 nonzero
&= GET_MODE_MASK (ptr_mode
);
3473 /* Include declared information about alignment of pointers. */
3474 /* ??? We don't properly preserve REG_POINTER changes across
3475 pointer-to-integer casts, so we can't trust it except for
3476 things that we know must be pointers. See execute/960116-1.c. */
3477 if ((x
== stack_pointer_rtx
3478 || x
== frame_pointer_rtx
3479 || x
== arg_pointer_rtx
)
3480 && REGNO_POINTER_ALIGN (REGNO (x
)))
3482 unsigned HOST_WIDE_INT alignment
3483 = REGNO_POINTER_ALIGN (REGNO (x
)) / BITS_PER_UNIT
;
3485 #ifdef PUSH_ROUNDING
3486 /* If PUSH_ROUNDING is defined, it is possible for the
3487 stack to be momentarily aligned only to that amount,
3488 so we pick the least alignment. */
3489 if (x
== stack_pointer_rtx
&& PUSH_ARGS
)
3490 alignment
= MIN ((unsigned HOST_WIDE_INT
) PUSH_ROUNDING (1),
3494 nonzero
&= ~(alignment
- 1);
3498 unsigned HOST_WIDE_INT nonzero_for_hook
= nonzero
;
3499 rtx
new = rtl_hooks
.reg_nonzero_bits (x
, mode
, known_x
,
3500 known_mode
, known_ret
,
3504 nonzero_for_hook
&= cached_nonzero_bits (new, mode
, known_x
,
3505 known_mode
, known_ret
);
3507 return nonzero_for_hook
;
3511 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
3512 /* If X is negative in MODE, sign-extend the value. */
3513 if (INTVAL (x
) > 0 && mode_width
< BITS_PER_WORD
3514 && 0 != (INTVAL (x
) & ((HOST_WIDE_INT
) 1 << (mode_width
- 1))))
3515 return (INTVAL (x
) | ((HOST_WIDE_INT
) (-1) << mode_width
));
3521 #ifdef LOAD_EXTEND_OP
3522 /* In many, if not most, RISC machines, reading a byte from memory
3523 zeros the rest of the register. Noticing that fact saves a lot
3524 of extra zero-extends. */
3525 if (LOAD_EXTEND_OP (GET_MODE (x
)) == ZERO_EXTEND
)
3526 nonzero
&= GET_MODE_MASK (GET_MODE (x
));
3531 case UNEQ
: case LTGT
:
3532 case GT
: case GTU
: case UNGT
:
3533 case LT
: case LTU
: case UNLT
:
3534 case GE
: case GEU
: case UNGE
:
3535 case LE
: case LEU
: case UNLE
:
3536 case UNORDERED
: case ORDERED
:
3537 /* If this produces an integer result, we know which bits are set.
3538 Code here used to clear bits outside the mode of X, but that is
3540 /* Mind that MODE is the mode the caller wants to look at this
3541 operation in, and not the actual operation mode. We can wind
3542 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
3543 that describes the results of a vector compare. */
3544 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_INT
3545 && mode_width
<= HOST_BITS_PER_WIDE_INT
)
3546 nonzero
= STORE_FLAG_VALUE
;
3551 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3552 and num_sign_bit_copies. */
3553 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
3554 == GET_MODE_BITSIZE (GET_MODE (x
)))
3558 if (GET_MODE_SIZE (GET_MODE (x
)) < mode_width
)
3559 nonzero
|= (GET_MODE_MASK (mode
) & ~GET_MODE_MASK (GET_MODE (x
)));
3564 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3565 and num_sign_bit_copies. */
3566 if (num_sign_bit_copies (XEXP (x
, 0), GET_MODE (x
))
3567 == GET_MODE_BITSIZE (GET_MODE (x
)))
3573 nonzero
&= (cached_nonzero_bits (XEXP (x
, 0), mode
,
3574 known_x
, known_mode
, known_ret
)
3575 & GET_MODE_MASK (mode
));
3579 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
3580 known_x
, known_mode
, known_ret
);
3581 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
3582 nonzero
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
3586 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
3587 Otherwise, show all the bits in the outer mode but not the inner
3589 inner_nz
= cached_nonzero_bits (XEXP (x
, 0), mode
,
3590 known_x
, known_mode
, known_ret
);
3591 if (GET_MODE (XEXP (x
, 0)) != VOIDmode
)
3593 inner_nz
&= GET_MODE_MASK (GET_MODE (XEXP (x
, 0)));
3595 & (((HOST_WIDE_INT
) 1
3596 << (GET_MODE_BITSIZE (GET_MODE (XEXP (x
, 0))) - 1))))
3597 inner_nz
|= (GET_MODE_MASK (mode
)
3598 & ~GET_MODE_MASK (GET_MODE (XEXP (x
, 0))));
3601 nonzero
&= inner_nz
;
3605 nonzero
&= cached_nonzero_bits (XEXP (x
, 0), mode
,
3606 known_x
, known_mode
, known_ret
)
3607 & cached_nonzero_bits (XEXP (x
, 1), mode
,
3608 known_x
, known_mode
, known_ret
);
3612 case UMIN
: case UMAX
: case SMIN
: case SMAX
:
3614 unsigned HOST_WIDE_INT nonzero0
=
3615 cached_nonzero_bits (XEXP (x
, 0), mode
,
3616 known_x
, known_mode
, known_ret
);
3618 /* Don't call nonzero_bits for the second time if it cannot change
3620 if ((nonzero
& nonzero0
) != nonzero
)
3622 | cached_nonzero_bits (XEXP (x
, 1), mode
,
3623 known_x
, known_mode
, known_ret
);
3627 case PLUS
: case MINUS
:
3629 case DIV
: case UDIV
:
3630 case MOD
: case UMOD
:
3631 /* We can apply the rules of arithmetic to compute the number of
3632 high- and low-order zero bits of these operations. We start by
3633 computing the width (position of the highest-order nonzero bit)
3634 and the number of low-order zero bits for each value. */
3636 unsigned HOST_WIDE_INT nz0
=
3637 cached_nonzero_bits (XEXP (x
, 0), mode
,
3638 known_x
, known_mode
, known_ret
);
3639 unsigned HOST_WIDE_INT nz1
=
3640 cached_nonzero_bits (XEXP (x
, 1), mode
,
3641 known_x
, known_mode
, known_ret
);
3642 int sign_index
= GET_MODE_BITSIZE (GET_MODE (x
)) - 1;
3643 int width0
= floor_log2 (nz0
) + 1;
3644 int width1
= floor_log2 (nz1
) + 1;
3645 int low0
= floor_log2 (nz0
& -nz0
);
3646 int low1
= floor_log2 (nz1
& -nz1
);
3647 HOST_WIDE_INT op0_maybe_minusp
3648 = (nz0
& ((HOST_WIDE_INT
) 1 << sign_index
));
3649 HOST_WIDE_INT op1_maybe_minusp
3650 = (nz1
& ((HOST_WIDE_INT
) 1 << sign_index
));
3651 unsigned int result_width
= mode_width
;
3657 result_width
= MAX (width0
, width1
) + 1;
3658 result_low
= MIN (low0
, low1
);
3661 result_low
= MIN (low0
, low1
);
3664 result_width
= width0
+ width1
;
3665 result_low
= low0
+ low1
;
3670 if (! op0_maybe_minusp
&& ! op1_maybe_minusp
)
3671 result_width
= width0
;
3676 result_width
= width0
;
3681 if (! op0_maybe_minusp
&& ! op1_maybe_minusp
)
3682 result_width
= MIN (width0
, width1
);
3683 result_low
= MIN (low0
, low1
);
3688 result_width
= MIN (width0
, width1
);
3689 result_low
= MIN (low0
, low1
);
3695 if (result_width
< mode_width
)
3696 nonzero
&= ((HOST_WIDE_INT
) 1 << result_width
) - 1;
3699 nonzero
&= ~(((HOST_WIDE_INT
) 1 << result_low
) - 1);
3701 #ifdef POINTERS_EXTEND_UNSIGNED
3702 /* If pointers extend unsigned and this is an addition or subtraction
3703 to a pointer in Pmode, all the bits above ptr_mode are known to be
3705 if (POINTERS_EXTEND_UNSIGNED
> 0 && GET_MODE (x
) == Pmode
3706 && (code
== PLUS
|| code
== MINUS
)
3707 && REG_P (XEXP (x
, 0)) && REG_POINTER (XEXP (x
, 0)))
3708 nonzero
&= GET_MODE_MASK (ptr_mode
);
3714 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
3715 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
3716 nonzero
&= ((HOST_WIDE_INT
) 1 << INTVAL (XEXP (x
, 1))) - 1;
3720 /* If this is a SUBREG formed for a promoted variable that has
3721 been zero-extended, we know that at least the high-order bits
3722 are zero, though others might be too. */
3724 if (SUBREG_PROMOTED_VAR_P (x
) && SUBREG_PROMOTED_UNSIGNED_P (x
) > 0)
3725 nonzero
= GET_MODE_MASK (GET_MODE (x
))
3726 & cached_nonzero_bits (SUBREG_REG (x
), GET_MODE (x
),
3727 known_x
, known_mode
, known_ret
);
3729 /* If the inner mode is a single word for both the host and target
3730 machines, we can compute this from which bits of the inner
3731 object might be nonzero. */
3732 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
))) <= BITS_PER_WORD
3733 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
)))
3734 <= HOST_BITS_PER_WIDE_INT
))
3736 nonzero
&= cached_nonzero_bits (SUBREG_REG (x
), mode
,
3737 known_x
, known_mode
, known_ret
);
3739 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
3740 /* If this is a typical RISC machine, we only have to worry
3741 about the way loads are extended. */
3742 if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
3744 & (((unsigned HOST_WIDE_INT
) 1
3745 << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
))) - 1))))
3747 : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) != ZERO_EXTEND
)
3748 || !MEM_P (SUBREG_REG (x
)))
3751 /* On many CISC machines, accessing an object in a wider mode
3752 causes the high-order bits to become undefined. So they are
3753 not known to be zero. */
3754 if (GET_MODE_SIZE (GET_MODE (x
))
3755 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
))))
3756 nonzero
|= (GET_MODE_MASK (GET_MODE (x
))
3757 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x
))));
3766 /* The nonzero bits are in two classes: any bits within MODE
3767 that aren't in GET_MODE (x) are always significant. The rest of the
3768 nonzero bits are those that are significant in the operand of
3769 the shift when shifted the appropriate number of bits. This
3770 shows that high-order bits are cleared by the right shift and
3771 low-order bits by left shifts. */
3772 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
3773 && INTVAL (XEXP (x
, 1)) >= 0
3774 && INTVAL (XEXP (x
, 1)) < HOST_BITS_PER_WIDE_INT
)
3776 enum machine_mode inner_mode
= GET_MODE (x
);
3777 unsigned int width
= GET_MODE_BITSIZE (inner_mode
);
3778 int count
= INTVAL (XEXP (x
, 1));
3779 unsigned HOST_WIDE_INT mode_mask
= GET_MODE_MASK (inner_mode
);
3780 unsigned HOST_WIDE_INT op_nonzero
=
3781 cached_nonzero_bits (XEXP (x
, 0), mode
,
3782 known_x
, known_mode
, known_ret
);
3783 unsigned HOST_WIDE_INT inner
= op_nonzero
& mode_mask
;
3784 unsigned HOST_WIDE_INT outer
= 0;
3786 if (mode_width
> width
)
3787 outer
= (op_nonzero
& nonzero
& ~mode_mask
);
3789 if (code
== LSHIFTRT
)
3791 else if (code
== ASHIFTRT
)
3795 /* If the sign bit may have been nonzero before the shift, we
3796 need to mark all the places it could have been copied to
3797 by the shift as possibly nonzero. */
3798 if (inner
& ((HOST_WIDE_INT
) 1 << (width
- 1 - count
)))
3799 inner
|= (((HOST_WIDE_INT
) 1 << count
) - 1) << (width
- count
);
3801 else if (code
== ASHIFT
)
3804 inner
= ((inner
<< (count
% width
)
3805 | (inner
>> (width
- (count
% width
)))) & mode_mask
);
3807 nonzero
&= (outer
| inner
);
3813 /* This is at most the number of bits in the mode. */
3814 nonzero
= ((HOST_WIDE_INT
) 2 << (floor_log2 (mode_width
))) - 1;
3818 /* If CLZ has a known value at zero, then the nonzero bits are
3819 that value, plus the number of bits in the mode minus one. */
3820 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
3821 nonzero
|= ((HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
3827 /* If CTZ has a known value at zero, then the nonzero bits are
3828 that value, plus the number of bits in the mode minus one. */
3829 if (CTZ_DEFINED_VALUE_AT_ZERO (mode
, nonzero
))
3830 nonzero
|= ((HOST_WIDE_INT
) 1 << (floor_log2 (mode_width
))) - 1;
3841 unsigned HOST_WIDE_INT nonzero_true
=
3842 cached_nonzero_bits (XEXP (x
, 1), mode
,
3843 known_x
, known_mode
, known_ret
);
3845 /* Don't call nonzero_bits for the second time if it cannot change
3847 if ((nonzero
& nonzero_true
) != nonzero
)
3848 nonzero
&= nonzero_true
3849 | cached_nonzero_bits (XEXP (x
, 2), mode
,
3850 known_x
, known_mode
, known_ret
);
3861 /* See the macro definition above. */
3862 #undef cached_num_sign_bit_copies
3865 /* The function cached_num_sign_bit_copies is a wrapper around
3866 num_sign_bit_copies1. It avoids exponential behavior in
3867 num_sign_bit_copies1 when X has identical subexpressions on the
3868 first or the second level. */
3871 cached_num_sign_bit_copies (rtx x
, enum machine_mode mode
, rtx known_x
,
3872 enum machine_mode known_mode
,
3873 unsigned int known_ret
)
3875 if (x
== known_x
&& mode
== known_mode
)
3878 /* Try to find identical subexpressions. If found call
3879 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
3880 the precomputed value for the subexpression as KNOWN_RET. */
3882 if (ARITHMETIC_P (x
))
3884 rtx x0
= XEXP (x
, 0);
3885 rtx x1
= XEXP (x
, 1);
3887 /* Check the first level. */
3890 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
3891 cached_num_sign_bit_copies (x0
, mode
, known_x
,
3895 /* Check the second level. */
3896 if (ARITHMETIC_P (x0
)
3897 && (x1
== XEXP (x0
, 0) || x1
== XEXP (x0
, 1)))
3899 num_sign_bit_copies1 (x
, mode
, x1
, mode
,
3900 cached_num_sign_bit_copies (x1
, mode
, known_x
,
3904 if (ARITHMETIC_P (x1
)
3905 && (x0
== XEXP (x1
, 0) || x0
== XEXP (x1
, 1)))
3907 num_sign_bit_copies1 (x
, mode
, x0
, mode
,
3908 cached_num_sign_bit_copies (x0
, mode
, known_x
,
3913 return num_sign_bit_copies1 (x
, mode
, known_x
, known_mode
, known_ret
);
3916 /* Return the number of bits at the high-order end of X that are known to
3917 be equal to the sign bit. X will be used in mode MODE; if MODE is
3918 VOIDmode, X will be used in its own mode. The returned value will always
3919 be between 1 and the number of bits in MODE. */
3922 num_sign_bit_copies1 (rtx x
, enum machine_mode mode
, rtx known_x
,
3923 enum machine_mode known_mode
,
3924 unsigned int known_ret
)
3926 enum rtx_code code
= GET_CODE (x
);
3927 unsigned int bitwidth
= GET_MODE_BITSIZE (mode
);
3928 int num0
, num1
, result
;
3929 unsigned HOST_WIDE_INT nonzero
;
3931 /* If we weren't given a mode, use the mode of X. If the mode is still
3932 VOIDmode, we don't know anything. Likewise if one of the modes is
3935 if (mode
== VOIDmode
)
3936 mode
= GET_MODE (x
);
3938 if (mode
== VOIDmode
|| FLOAT_MODE_P (mode
) || FLOAT_MODE_P (GET_MODE (x
)))
3941 /* For a smaller object, just ignore the high bits. */
3942 if (bitwidth
< GET_MODE_BITSIZE (GET_MODE (x
)))
3944 num0
= cached_num_sign_bit_copies (x
, GET_MODE (x
),
3945 known_x
, known_mode
, known_ret
);
3947 num0
- (int) (GET_MODE_BITSIZE (GET_MODE (x
)) - bitwidth
));
3950 if (GET_MODE (x
) != VOIDmode
&& bitwidth
> GET_MODE_BITSIZE (GET_MODE (x
)))
3952 #ifndef WORD_REGISTER_OPERATIONS
3953 /* If this machine does not do all register operations on the entire
3954 register and MODE is wider than the mode of X, we can say nothing
3955 at all about the high-order bits. */
3958 /* Likewise on machines that do, if the mode of the object is smaller
3959 than a word and loads of that size don't sign extend, we can say
3960 nothing about the high order bits. */
3961 if (GET_MODE_BITSIZE (GET_MODE (x
)) < BITS_PER_WORD
3962 #ifdef LOAD_EXTEND_OP
3963 && LOAD_EXTEND_OP (GET_MODE (x
)) != SIGN_EXTEND
3974 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3975 /* If pointers extend signed and this is a pointer in Pmode, say that
3976 all the bits above ptr_mode are known to be sign bit copies. */
3977 if (! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
&& mode
== Pmode
3979 return GET_MODE_BITSIZE (Pmode
) - GET_MODE_BITSIZE (ptr_mode
) + 1;
3983 unsigned int copies_for_hook
= 1, copies
= 1;
3984 rtx
new = rtl_hooks
.reg_num_sign_bit_copies (x
, mode
, known_x
,
3985 known_mode
, known_ret
,
3989 copies
= cached_num_sign_bit_copies (new, mode
, known_x
,
3990 known_mode
, known_ret
);
3992 if (copies
> 1 || copies_for_hook
> 1)
3993 return MAX (copies
, copies_for_hook
);
3995 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
4000 #ifdef LOAD_EXTEND_OP
4001 /* Some RISC machines sign-extend all loads of smaller than a word. */
4002 if (LOAD_EXTEND_OP (GET_MODE (x
)) == SIGN_EXTEND
)
4003 return MAX (1, ((int) bitwidth
4004 - (int) GET_MODE_BITSIZE (GET_MODE (x
)) + 1));
4009 /* If the constant is negative, take its 1's complement and remask.
4010 Then see how many zero bits we have. */
4011 nonzero
= INTVAL (x
) & GET_MODE_MASK (mode
);
4012 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4013 && (nonzero
& ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4014 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4016 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4019 /* If this is a SUBREG for a promoted object that is sign-extended
4020 and we are looking at it in a wider mode, we know that at least the
4021 high-order bits are known to be sign bit copies. */
4023 if (SUBREG_PROMOTED_VAR_P (x
) && ! SUBREG_PROMOTED_UNSIGNED_P (x
))
4025 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4026 known_x
, known_mode
, known_ret
);
4027 return MAX ((int) bitwidth
4028 - (int) GET_MODE_BITSIZE (GET_MODE (x
)) + 1,
4032 /* For a smaller object, just ignore the high bits. */
4033 if (bitwidth
<= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
))))
4035 num0
= cached_num_sign_bit_copies (SUBREG_REG (x
), VOIDmode
,
4036 known_x
, known_mode
, known_ret
);
4037 return MAX (1, (num0
4038 - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x
)))
4042 #ifdef WORD_REGISTER_OPERATIONS
4043 #ifdef LOAD_EXTEND_OP
4044 /* For paradoxical SUBREGs on machines where all register operations
4045 affect the entire register, just look inside. Note that we are
4046 passing MODE to the recursive call, so the number of sign bit copies
4047 will remain relative to that mode, not the inner mode. */
4049 /* This works only if loads sign extend. Otherwise, if we get a
4050 reload for the inner part, it may be loaded from the stack, and
4051 then we lose all sign bit copies that existed before the store
4054 if ((GET_MODE_SIZE (GET_MODE (x
))
4055 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x
))))
4056 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x
))) == SIGN_EXTEND
4057 && MEM_P (SUBREG_REG (x
)))
4058 return cached_num_sign_bit_copies (SUBREG_REG (x
), mode
,
4059 known_x
, known_mode
, known_ret
);
4065 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4066 return MAX (1, (int) bitwidth
- INTVAL (XEXP (x
, 1)));
4070 return (bitwidth
- GET_MODE_BITSIZE (GET_MODE (XEXP (x
, 0)))
4071 + cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4072 known_x
, known_mode
, known_ret
));
4075 /* For a smaller object, just ignore the high bits. */
4076 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), VOIDmode
,
4077 known_x
, known_mode
, known_ret
);
4078 return MAX (1, (num0
- (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x
, 0)))
4082 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4083 known_x
, known_mode
, known_ret
);
4085 case ROTATE
: case ROTATERT
:
4086 /* If we are rotating left by a number of bits less than the number
4087 of sign bit copies, we can just subtract that amount from the
4089 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
4090 && INTVAL (XEXP (x
, 1)) >= 0
4091 && INTVAL (XEXP (x
, 1)) < (int) bitwidth
)
4093 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4094 known_x
, known_mode
, known_ret
);
4095 return MAX (1, num0
- (code
== ROTATE
? INTVAL (XEXP (x
, 1))
4096 : (int) bitwidth
- INTVAL (XEXP (x
, 1))));
4101 /* In general, this subtracts one sign bit copy. But if the value
4102 is known to be positive, the number of sign bit copies is the
4103 same as that of the input. Finally, if the input has just one bit
4104 that might be nonzero, all the bits are copies of the sign bit. */
4105 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4106 known_x
, known_mode
, known_ret
);
4107 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4108 return num0
> 1 ? num0
- 1 : 1;
4110 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4115 && (((HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
))
4120 case IOR
: case AND
: case XOR
:
4121 case SMIN
: case SMAX
: case UMIN
: case UMAX
:
4122 /* Logical operations will preserve the number of sign-bit copies.
4123 MIN and MAX operations always return one of the operands. */
4124 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4125 known_x
, known_mode
, known_ret
);
4126 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4127 known_x
, known_mode
, known_ret
);
4128 return MIN (num0
, num1
);
4130 case PLUS
: case MINUS
:
4131 /* For addition and subtraction, we can have a 1-bit carry. However,
4132 if we are subtracting 1 from a positive number, there will not
4133 be such a carry. Furthermore, if the positive number is known to
4134 be 0 or 1, we know the result is either -1 or 0. */
4136 if (code
== PLUS
&& XEXP (x
, 1) == constm1_rtx
4137 && bitwidth
<= HOST_BITS_PER_WIDE_INT
)
4139 nonzero
= nonzero_bits (XEXP (x
, 0), mode
);
4140 if ((((HOST_WIDE_INT
) 1 << (bitwidth
- 1)) & nonzero
) == 0)
4141 return (nonzero
== 1 || nonzero
== 0 ? bitwidth
4142 : bitwidth
- floor_log2 (nonzero
) - 1);
4145 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4146 known_x
, known_mode
, known_ret
);
4147 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4148 known_x
, known_mode
, known_ret
);
4149 result
= MAX (1, MIN (num0
, num1
) - 1);
4151 #ifdef POINTERS_EXTEND_UNSIGNED
4152 /* If pointers extend signed and this is an addition or subtraction
4153 to a pointer in Pmode, all the bits above ptr_mode are known to be
4155 if (! POINTERS_EXTEND_UNSIGNED
&& GET_MODE (x
) == Pmode
4156 && (code
== PLUS
|| code
== MINUS
)
4157 && REG_P (XEXP (x
, 0)) && REG_POINTER (XEXP (x
, 0)))
4158 result
= MAX ((int) (GET_MODE_BITSIZE (Pmode
)
4159 - GET_MODE_BITSIZE (ptr_mode
) + 1),
4165 /* The number of bits of the product is the sum of the number of
4166 bits of both terms. However, unless one of the terms if known
4167 to be positive, we must allow for an additional bit since negating
4168 a negative number can remove one sign bit copy. */
4170 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4171 known_x
, known_mode
, known_ret
);
4172 num1
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4173 known_x
, known_mode
, known_ret
);
4175 result
= bitwidth
- (bitwidth
- num0
) - (bitwidth
- num1
);
4177 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4178 || (((nonzero_bits (XEXP (x
, 0), mode
)
4179 & ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4180 && ((nonzero_bits (XEXP (x
, 1), mode
)
4181 & ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))))
4184 return MAX (1, result
);
4187 /* The result must be <= the first operand. If the first operand
4188 has the high bit set, we know nothing about the number of sign
4190 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4192 else if ((nonzero_bits (XEXP (x
, 0), mode
)
4193 & ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4196 return cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4197 known_x
, known_mode
, known_ret
);
4200 /* The result must be <= the second operand. */
4201 return cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4202 known_x
, known_mode
, known_ret
);
4205 /* Similar to unsigned division, except that we have to worry about
4206 the case where the divisor is negative, in which case we have
4208 result
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4209 known_x
, known_mode
, known_ret
);
4211 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4212 || (nonzero_bits (XEXP (x
, 1), mode
)
4213 & ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4219 result
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4220 known_x
, known_mode
, known_ret
);
4222 && (bitwidth
> HOST_BITS_PER_WIDE_INT
4223 || (nonzero_bits (XEXP (x
, 1), mode
)
4224 & ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0))
4230 /* Shifts by a constant add to the number of bits equal to the
4232 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4233 known_x
, known_mode
, known_ret
);
4234 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
4235 && INTVAL (XEXP (x
, 1)) > 0)
4236 num0
= MIN ((int) bitwidth
, num0
+ INTVAL (XEXP (x
, 1)));
4241 /* Left shifts destroy copies. */
4242 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
4243 || INTVAL (XEXP (x
, 1)) < 0
4244 || INTVAL (XEXP (x
, 1)) >= (int) bitwidth
)
4247 num0
= cached_num_sign_bit_copies (XEXP (x
, 0), mode
,
4248 known_x
, known_mode
, known_ret
);
4249 return MAX (1, num0
- INTVAL (XEXP (x
, 1)));
4252 num0
= cached_num_sign_bit_copies (XEXP (x
, 1), mode
,
4253 known_x
, known_mode
, known_ret
);
4254 num1
= cached_num_sign_bit_copies (XEXP (x
, 2), mode
,
4255 known_x
, known_mode
, known_ret
);
4256 return MIN (num0
, num1
);
4258 case EQ
: case NE
: case GE
: case GT
: case LE
: case LT
:
4259 case UNEQ
: case LTGT
: case UNGE
: case UNGT
: case UNLE
: case UNLT
:
4260 case GEU
: case GTU
: case LEU
: case LTU
:
4261 case UNORDERED
: case ORDERED
:
4262 /* If the constant is negative, take its 1's complement and remask.
4263 Then see how many zero bits we have. */
4264 nonzero
= STORE_FLAG_VALUE
;
4265 if (bitwidth
<= HOST_BITS_PER_WIDE_INT
4266 && (nonzero
& ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))) != 0)
4267 nonzero
= (~nonzero
) & GET_MODE_MASK (mode
);
4269 return (nonzero
== 0 ? bitwidth
: bitwidth
- floor_log2 (nonzero
) - 1);
4275 /* If we haven't been able to figure it out by one of the above rules,
4276 see if some of the high-order bits are known to be zero. If so,
4277 count those bits and return one less than that amount. If we can't
4278 safely compute the mask for this mode, always return BITWIDTH. */
4280 bitwidth
= GET_MODE_BITSIZE (mode
);
4281 if (bitwidth
> HOST_BITS_PER_WIDE_INT
)
4284 nonzero
= nonzero_bits (x
, mode
);
4285 return nonzero
& ((HOST_WIDE_INT
) 1 << (bitwidth
- 1))
4286 ? 1 : bitwidth
- floor_log2 (nonzero
) - 1;
4289 /* Calculate the rtx_cost of a single instruction. A return value of
4290 zero indicates an instruction pattern without a known cost. */
4293 insn_rtx_cost (rtx pat
)
4298 /* Extract the single set rtx from the instruction pattern.
4299 We can't use single_set since we only have the pattern. */
4300 if (GET_CODE (pat
) == SET
)
4302 else if (GET_CODE (pat
) == PARALLEL
)
4305 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
4307 rtx x
= XVECEXP (pat
, 0, i
);
4308 if (GET_CODE (x
) == SET
)
4321 cost
= rtx_cost (SET_SRC (set
), SET
);
4322 return cost
> 0 ? cost
: COSTS_N_INSNS (1);
4325 /* Given an insn INSN and condition COND, return the condition in a
4326 canonical form to simplify testing by callers. Specifically:
4328 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4329 (2) Both operands will be machine operands; (cc0) will have been replaced.
4330 (3) If an operand is a constant, it will be the second operand.
4331 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4332 for GE, GEU, and LEU.
4334 If the condition cannot be understood, or is an inequality floating-point
4335 comparison which needs to be reversed, 0 will be returned.
4337 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4339 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4340 insn used in locating the condition was found. If a replacement test
4341 of the condition is desired, it should be placed in front of that
4342 insn and we will be sure that the inputs are still valid.
4344 If WANT_REG is nonzero, we wish the condition to be relative to that
4345 register, if possible. Therefore, do not canonicalize the condition
4346 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4347 to be a compare to a CC mode register.
4349 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4353 canonicalize_condition (rtx insn
, rtx cond
, int reverse
, rtx
*earliest
,
4354 rtx want_reg
, int allow_cc_mode
, int valid_at_insn_p
)
4361 int reverse_code
= 0;
4362 enum machine_mode mode
;
4363 basic_block bb
= BLOCK_FOR_INSN (insn
);
4365 code
= GET_CODE (cond
);
4366 mode
= GET_MODE (cond
);
4367 op0
= XEXP (cond
, 0);
4368 op1
= XEXP (cond
, 1);
4371 code
= reversed_comparison_code (cond
, insn
);
4372 if (code
== UNKNOWN
)
4378 /* If we are comparing a register with zero, see if the register is set
4379 in the previous insn to a COMPARE or a comparison operation. Perform
4380 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4383 while ((GET_RTX_CLASS (code
) == RTX_COMPARE
4384 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
4385 && op1
== CONST0_RTX (GET_MODE (op0
))
4388 /* Set nonzero when we find something of interest. */
4392 /* If comparison with cc0, import actual comparison from compare
4396 if ((prev
= prev_nonnote_insn (prev
)) == 0
4397 || !NONJUMP_INSN_P (prev
)
4398 || (set
= single_set (prev
)) == 0
4399 || SET_DEST (set
) != cc0_rtx
)
4402 op0
= SET_SRC (set
);
4403 op1
= CONST0_RTX (GET_MODE (op0
));
4409 /* If this is a COMPARE, pick up the two things being compared. */
4410 if (GET_CODE (op0
) == COMPARE
)
4412 op1
= XEXP (op0
, 1);
4413 op0
= XEXP (op0
, 0);
4416 else if (!REG_P (op0
))
4419 /* Go back to the previous insn. Stop if it is not an INSN. We also
4420 stop if it isn't a single set or if it has a REG_INC note because
4421 we don't want to bother dealing with it. */
4423 if ((prev
= prev_nonnote_insn (prev
)) == 0
4424 || !NONJUMP_INSN_P (prev
)
4425 || FIND_REG_INC_NOTE (prev
, NULL_RTX
)
4426 /* In cfglayout mode, there do not have to be labels at the
4427 beginning of a block, or jumps at the end, so the previous
4428 conditions would not stop us when we reach bb boundary. */
4429 || BLOCK_FOR_INSN (prev
) != bb
)
4432 set
= set_of (op0
, prev
);
4435 && (GET_CODE (set
) != SET
4436 || !rtx_equal_p (SET_DEST (set
), op0
)))
4439 /* If this is setting OP0, get what it sets it to if it looks
4443 enum machine_mode inner_mode
= GET_MODE (SET_DEST (set
));
4444 #ifdef FLOAT_STORE_FLAG_VALUE
4445 REAL_VALUE_TYPE fsfv
;
4448 /* ??? We may not combine comparisons done in a CCmode with
4449 comparisons not done in a CCmode. This is to aid targets
4450 like Alpha that have an IEEE compliant EQ instruction, and
4451 a non-IEEE compliant BEQ instruction. The use of CCmode is
4452 actually artificial, simply to prevent the combination, but
4453 should not affect other platforms.
4455 However, we must allow VOIDmode comparisons to match either
4456 CCmode or non-CCmode comparison, because some ports have
4457 modeless comparisons inside branch patterns.
4459 ??? This mode check should perhaps look more like the mode check
4460 in simplify_comparison in combine. */
4462 if ((GET_CODE (SET_SRC (set
)) == COMPARE
4465 && GET_MODE_CLASS (inner_mode
) == MODE_INT
4466 && (GET_MODE_BITSIZE (inner_mode
)
4467 <= HOST_BITS_PER_WIDE_INT
)
4468 && (STORE_FLAG_VALUE
4469 & ((HOST_WIDE_INT
) 1
4470 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
4471 #ifdef FLOAT_STORE_FLAG_VALUE
4473 && SCALAR_FLOAT_MODE_P (inner_mode
)
4474 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
4475 REAL_VALUE_NEGATIVE (fsfv
)))
4478 && COMPARISON_P (SET_SRC (set
))))
4479 && (((GET_MODE_CLASS (mode
) == MODE_CC
)
4480 == (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
4481 || mode
== VOIDmode
|| inner_mode
== VOIDmode
))
4483 else if (((code
== EQ
4485 && (GET_MODE_BITSIZE (inner_mode
)
4486 <= HOST_BITS_PER_WIDE_INT
)
4487 && GET_MODE_CLASS (inner_mode
) == MODE_INT
4488 && (STORE_FLAG_VALUE
4489 & ((HOST_WIDE_INT
) 1
4490 << (GET_MODE_BITSIZE (inner_mode
) - 1))))
4491 #ifdef FLOAT_STORE_FLAG_VALUE
4493 && SCALAR_FLOAT_MODE_P (inner_mode
)
4494 && (fsfv
= FLOAT_STORE_FLAG_VALUE (inner_mode
),
4495 REAL_VALUE_NEGATIVE (fsfv
)))
4498 && COMPARISON_P (SET_SRC (set
))
4499 && (((GET_MODE_CLASS (mode
) == MODE_CC
)
4500 == (GET_MODE_CLASS (inner_mode
) == MODE_CC
))
4501 || mode
== VOIDmode
|| inner_mode
== VOIDmode
))
4511 else if (reg_set_p (op0
, prev
))
4512 /* If this sets OP0, but not directly, we have to give up. */
4517 /* If the caller is expecting the condition to be valid at INSN,
4518 make sure X doesn't change before INSN. */
4519 if (valid_at_insn_p
)
4520 if (modified_in_p (x
, prev
) || modified_between_p (x
, prev
, insn
))
4522 if (COMPARISON_P (x
))
4523 code
= GET_CODE (x
);
4526 code
= reversed_comparison_code (x
, prev
);
4527 if (code
== UNKNOWN
)
4532 op0
= XEXP (x
, 0), op1
= XEXP (x
, 1);
4538 /* If constant is first, put it last. */
4539 if (CONSTANT_P (op0
))
4540 code
= swap_condition (code
), tem
= op0
, op0
= op1
, op1
= tem
;
4542 /* If OP0 is the result of a comparison, we weren't able to find what
4543 was really being compared, so fail. */
4545 && GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
)
4548 /* Canonicalize any ordered comparison with integers involving equality
4549 if we can do computations in the relevant mode and we do not
4552 if (GET_MODE_CLASS (GET_MODE (op0
)) != MODE_CC
4553 && GET_CODE (op1
) == CONST_INT
4554 && GET_MODE (op0
) != VOIDmode
4555 && GET_MODE_BITSIZE (GET_MODE (op0
)) <= HOST_BITS_PER_WIDE_INT
)
4557 HOST_WIDE_INT const_val
= INTVAL (op1
);
4558 unsigned HOST_WIDE_INT uconst_val
= const_val
;
4559 unsigned HOST_WIDE_INT max_val
4560 = (unsigned HOST_WIDE_INT
) GET_MODE_MASK (GET_MODE (op0
));
4565 if ((unsigned HOST_WIDE_INT
) const_val
!= max_val
>> 1)
4566 code
= LT
, op1
= gen_int_mode (const_val
+ 1, GET_MODE (op0
));
4569 /* When cross-compiling, const_val might be sign-extended from
4570 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
4572 if ((HOST_WIDE_INT
) (const_val
& max_val
)
4573 != (((HOST_WIDE_INT
) 1
4574 << (GET_MODE_BITSIZE (GET_MODE (op0
)) - 1))))
4575 code
= GT
, op1
= gen_int_mode (const_val
- 1, GET_MODE (op0
));
4579 if (uconst_val
< max_val
)
4580 code
= LTU
, op1
= gen_int_mode (uconst_val
+ 1, GET_MODE (op0
));
4584 if (uconst_val
!= 0)
4585 code
= GTU
, op1
= gen_int_mode (uconst_val
- 1, GET_MODE (op0
));
4593 /* Never return CC0; return zero instead. */
4597 return gen_rtx_fmt_ee (code
, VOIDmode
, op0
, op1
);
4600 /* Given a jump insn JUMP, return the condition that will cause it to branch
4601 to its JUMP_LABEL. If the condition cannot be understood, or is an
4602 inequality floating-point comparison which needs to be reversed, 0 will
4605 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4606 insn used in locating the condition was found. If a replacement test
4607 of the condition is desired, it should be placed in front of that
4608 insn and we will be sure that the inputs are still valid. If EARLIEST
4609 is null, the returned condition will be valid at INSN.
4611 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
4612 compare CC mode register.
4614 VALID_AT_INSN_P is the same as for canonicalize_condition. */
4617 get_condition (rtx jump
, rtx
*earliest
, int allow_cc_mode
, int valid_at_insn_p
)
4623 /* If this is not a standard conditional jump, we can't parse it. */
4625 || ! any_condjump_p (jump
))
4627 set
= pc_set (jump
);
4629 cond
= XEXP (SET_SRC (set
), 0);
4631 /* If this branches to JUMP_LABEL when the condition is false, reverse
4634 = GET_CODE (XEXP (SET_SRC (set
), 2)) == LABEL_REF
4635 && XEXP (XEXP (SET_SRC (set
), 2), 0) == JUMP_LABEL (jump
);
4637 return canonicalize_condition (jump
, cond
, reverse
, earliest
, NULL_RTX
,
4638 allow_cc_mode
, valid_at_insn_p
);
4641 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
4642 TARGET_MODE_REP_EXTENDED.
4644 Note that we assume that the property of
4645 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
4646 narrower than mode B. I.e., if A is a mode narrower than B then in
4647 order to be able to operate on it in mode B, mode A needs to
4648 satisfy the requirements set by the representation of mode B. */
4651 init_num_sign_bit_copies_in_rep (void)
4653 enum machine_mode mode
, in_mode
;
4655 for (in_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); in_mode
!= VOIDmode
;
4656 in_mode
= GET_MODE_WIDER_MODE (mode
))
4657 for (mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
); mode
!= in_mode
;
4658 mode
= GET_MODE_WIDER_MODE (mode
))
4660 enum machine_mode i
;
4662 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
4663 extends to the next widest mode. */
4664 gcc_assert (targetm
.mode_rep_extended (mode
, in_mode
) == UNKNOWN
4665 || GET_MODE_WIDER_MODE (mode
) == in_mode
);
4667 /* We are in in_mode. Count how many bits outside of mode
4668 have to be copies of the sign-bit. */
4669 for (i
= mode
; i
!= in_mode
; i
= GET_MODE_WIDER_MODE (i
))
4671 enum machine_mode wider
= GET_MODE_WIDER_MODE (i
);
4673 if (targetm
.mode_rep_extended (i
, wider
) == SIGN_EXTEND
4674 /* We can only check sign-bit copies starting from the
4675 top-bit. In order to be able to check the bits we
4676 have already seen we pretend that subsequent bits
4677 have to be sign-bit copies too. */
4678 || num_sign_bit_copies_in_rep
[in_mode
][mode
])
4679 num_sign_bit_copies_in_rep
[in_mode
][mode
]
4680 += GET_MODE_BITSIZE (wider
) - GET_MODE_BITSIZE (i
);
4685 /* Suppose that truncation from the machine mode of X to MODE is not a
4686 no-op. See if there is anything special about X so that we can
4687 assume it already contains a truncated value of MODE. */
4690 truncated_to_mode (enum machine_mode mode
, rtx x
)
4692 /* This register has already been used in MODE without explicit
4694 if (REG_P (x
) && rtl_hooks
.reg_truncated_to_mode (mode
, x
))
4697 /* See if we already satisfy the requirements of MODE. If yes we
4698 can just switch to MODE. */
4699 if (num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
]
4700 && (num_sign_bit_copies (x
, GET_MODE (x
))
4701 >= num_sign_bit_copies_in_rep
[GET_MODE (x
)][mode
] + 1))
4707 /* Initialize non_rtx_starting_operands, which is used to speed up
4713 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4715 const char *format
= GET_RTX_FORMAT (i
);
4716 const char *first
= strpbrk (format
, "eEV");
4717 non_rtx_starting_operands
[i
] = first
? first
- format
: -1;
4720 init_num_sign_bit_copies_in_rep ();
4723 /* Check whether this is a constant pool constant. */
4725 constant_pool_constant_p (rtx x
)
4727 x
= avoid_constant_pool_reference (x
);
4728 return GET_CODE (x
) == CONST_DOUBLE
;